code
stringlengths 501
5.19M
| package
stringlengths 2
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
def normalize_word(word):
new_word = ''
for char in word:
if char.isdigit():
new_word += '0'
else:
new_word += char
return new_word
def read_instance(input_file, word_alphabet, label_alphabet, number_normalized, max_sent_length, split_token='\t'):
in_lines = open(input_file, 'r', encoding='utf-8').readlines()
instance_texts, instance_Ids = [], []
doc_id = ''
words, labels = [], []
word_Ids, label_Ids = [], []
# for sequence labeling data format i.e. CoNLL 2003
for line in in_lines:
if not doc_id:
doc_id = line.strip()
continue
if len(line) > 2:
pairs = line.strip('\n').split(split_token)
word = pairs[0]
words.append(word)
if number_normalized:
word = normalize_word(word)
label = pairs[-1]
labels.append(label)
word_Ids.append(word_alphabet.get_index(word))
label_Ids.append(label_alphabet.get_index(label))
else:
if (len(words) > 0) and ((max_sent_length < 0) or (len(words) < max_sent_length)):
# get sent_word_Ids_list (split with '.')
period_id = word_alphabet.get_index('#####')
sent_word_Ids_list = []
idx = 0
sent_word_Ids = []
while idx <= len(word_Ids) - 1:
sent_word_Ids.append(word_Ids[idx])
if word_Ids[idx] == period_id:
sent_word_Ids_list.append(sent_word_Ids)
sent_word_Ids = []
idx += 1
if sent_word_Ids:
sent_word_Ids_list.append(sent_word_Ids)
instance_texts.append([words, labels, doc_id])
instance_Ids.append([word_Ids, sent_word_Ids_list, label_Ids])
doc_id = ''
words, labels = [], []
word_Ids, label_Ids = [], []
return instance_texts, instance_Ids
def build_pretrain_embedding(embedding_path, word_alphabet, embedd_dim=100, norm=True):
embedd_dict = dict()
if embedding_path != None:
embedd_dict, embedd_dim = load_pretrain_emb(embedding_path)
alphabet_size = word_alphabet.size()
scale = np.sqrt(3.0 / embedd_dim)
pretrain_emb = np.empty([word_alphabet.size(), embedd_dim])
perfect_match = 0
case_match = 0
not_match = 0
for word, index in word_alphabet.iteritems():
if word in embedd_dict:
if norm:
pretrain_emb[index, :] = norm2one(embedd_dict[word])
else:
pretrain_emb[index, :] = embedd_dict[word]
perfect_match += 1
elif word.lower() in embedd_dict:
if norm:
pretrain_emb[index, :] = norm2one(embedd_dict[word.lower()])
else:
pretrain_emb[index, :] = embedd_dict[word.lower()]
case_match += 1
else:
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedd_dim])
not_match += 1
pretrained_size = len(embedd_dict)
print('Embedding:\n pretrain word:%s, prefect match:%s, case_match:%s, oov:%s, oov%%:%s' % (
pretrained_size, perfect_match, case_match, not_match, (not_match + 0.) / alphabet_size))
return pretrain_emb, embedd_dim
def norm2one(vec):
root_sum_square = np.sqrt(np.sum(np.square(vec)))
return vec / root_sum_square
def load_pretrain_emb(embedding_path):
embedd_dim = -1
embedd_dict = dict()
with open(embedding_path, 'r', encoding='ISO-8859-1') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
elif embedd_dim + 1 != len(tokens):
# ignore illegal embedding line
continue
# assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim])
embedd[:] = tokens[1:]
first_col = tokens[0]
embedd_dict[first_col] = embedd
return embedd_dict, embedd_dim
if __name__ == '__main__':
a = np.arange(9.0)
print(a)
print(norm2one(a)) | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/utils/functions.py | functions.py |
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from doc_event.model.wordsequence import WordSequence
from doc_event.model.crf import CRF
class SeqLabel(nn.Module):
def __init__(self, data):
super(SeqLabel, self).__init__()
self.use_crf = data.use_crf
print('build sequence labeling network...')
print('word feature extractor: ', data.word_feature_extractor)
print('use crf: ', self.use_crf)
self.gpu = data.HP_gpu
self.average_batch = data.average_batch_loss
# add two more label for downlayer lstm, use original label size for CRF
label_size = data.label_alphabet_size
data.label_alphabet_size += 2
self.word_hidden = WordSequence(data)
if self.use_crf:
self.crf = CRF(label_size, self.gpu)
def calculate_loss(self, word_inputs, word_seq_lengths, list_sent_words_tensor, batch_label, mask):
outs = self.word_hidden(word_inputs, list_sent_words_tensor, word_seq_lengths)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
total_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
loss_function = nn.NLLLoss(ignore_index=0, size_average=False)
outs = outs.view(batch_size * seq_len, -1)
score = F.log_softmax(outs, 1)
total_loss = loss_function(score, batch_label.view(batch_size * seq_len))
_, tag_seq = torch.max(score, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
if self.average_batch:
total_loss = total_loss / batch_size
return total_loss, tag_seq
def forward(self, word_inputs, word_seq_lengths, list_sent_words_tensor, mask):
outs = self.word_hidden(word_inputs, list_sent_words_tensor, word_seq_lengths)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
outs = outs.view(batch_size * seq_len, -1)
_, tag_seq = torch.max(outs, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
# filter padded position with zero
tag_seq = mask.long() * tag_seq
return tag_seq | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/seqlabel.py | seqlabel.py |
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from doc_event.model.wordrep import WordRep
seed_num = 42
torch.manual_seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
class WordSequence(nn.Module):
def __init__(self, data):
super(WordSequence, self).__init__()
print('build word sequence feature extractor: %s...' % (data.word_feature_extractor))
self.gpu = data.HP_gpu
# self.batch_size = data.HP_batch_size
# self.hidden_dim = data.HP_hidden_dim
self.droplstm = nn.Dropout(data.HP_dropout)
self.droplstm_sent = nn.Dropout(data.HP_dropout - 0.1)
self.bilstm_flag = data.HP_bilstm
self.lstm_layer = data.HP_lstm_layer
self.wordrep = WordRep(data)
self.input_size = data.word_emb_dim
# bert fea size
if data.use_bert:
self.input_size += 768
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
if self.bilstm_flag:
self.lstm_hidden = data.HP_hidden_dim // 2
else:
self.lstm_hidden = data.HP_hidden_dim
self.lstm = nn.LSTM(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
self.sent_lstm = nn.LSTM(self.input_size, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
self.lstm2 = nn.LSTM(self.lstm_hidden * 2, self.lstm_hidden, num_layers=self.lstm_layer,
batch_first=True, bidirectional=self.bilstm_flag)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
self.hidden2tag_sent_level = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
self.gate = nn.Linear(data.HP_hidden_dim * 2, data.HP_hidden_dim)
self.sigmoid = nn.Sigmoid()
if self.gpu:
self.droplstm = self.droplstm.cuda()
self.droplstm_sent = self.droplstm_sent.cuda()
self.hidden2tag = self.hidden2tag.cuda()
self.hidden2tag_sent_level = self.hidden2tag_sent_level.cuda()
self.lstm = self.lstm.cuda()
self.sent_lstm = self.sent_lstm.cuda()
self.gate = self.gate.cuda()
self.sigmoid = self.sigmoid.cuda()
def get_sent_rep(self, sent, sent_length):
word_represent = self.wordrep(sent, sent_length)
packed_words = pack_padded_sequence(word_represent, sent_length, True)
hidden = None
lstm_out, hidden = self.sent_lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out)
feature_out_sent = self.droplstm_sent(lstm_out.transpose(1, 0))
return feature_out_sent
def forward(self, word_inputs, list_sent_words_tensor, word_seq_lengths):
"""
input:
word_inputs: (batch_size, sent_len)
feature_inputs: [(batch_size, sent_len), ...] list of variables
word_seq_lengths: list of batch_size, (batch_size,1)
output:
Variable(batch_size, sent_len, hidden_dim)
"""
# paragraph-level
word_represent = self.wordrep(word_inputs, word_seq_lengths) # [batch_size, seq_len, embed_size]
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
hidden = None
lstm_out, hidden = self.lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out) # [seq_len, batch_size, hidden_size]
feature_out = self.droplstm(lstm_out.transpose(1, 0)) # [batch_size, seq_len, hidden_size]
# sentence-level reps
feature_out_sents = torch.zeros(
(feature_out.size()[0], feature_out.size()[1], feature_out.size()[2]),
requires_grad=False
).float()
if self.gpu:
feature_out_sents = feature_out_sents.cuda()
for idx, seq in enumerate(list_sent_words_tensor):
feature_out_seq = []
for sent in seq:
feature_out_sent = self.get_sent_rep(sent, np.array([len(sent[0])]))
feature_out_seq.append(feature_out_sent.squeeze(0))
feature_out_seq = torch.cat(feature_out_seq, 0)
if self.gpu:
feature_out_seq.cuda()
feature_out_sents[idx][:len(feature_out_seq)][:] = feature_out_seq
gamma = self.sigmoid(self.gate(torch.cat((feature_out, feature_out_sents), 2)))
outputs_final = self.hidden2tag(gamma * feature_out + (1 - gamma) * feature_out_sents)
return outputs_final | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/wordsequence.py | wordsequence.py |
from __future__ import print_function
from __future__ import absolute_import
import os
import torch
import torch.nn as nn
import numpy as np
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM, WordpieceTokenizer
seed_num = 42
torch.manual_seed(seed_num)
np.random.seed(seed_num)
torch.cuda.manual_seed_all(seed_num)
torch.backends.cudnn.deterministic = True
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print('build word representation...')
self.gpu = data.HP_gpu
self.batch_size = data.HP_batch_size
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(
torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
# bert feature
self.word_alphabet = data.word_alphabet
self._use_bert = data.use_bert
self._bert_dir = data.bert_dir
if self._use_bert:
# Load pre-trained model (weights)
self.bert_model = BertModel.from_pretrained(self._bert_dir)
self.bert_model.eval()
# Load pre-trained model tokenizer (vocabulary)
self.tokenizer = BertTokenizer.from_pretrained(self._bert_dir)
self.wpiecetokenizer = WordpieceTokenizer(self.tokenizer.vocab)
self.vocab = self._read_vocab(path=self._bert_dir)
if self.gpu:
self.drop = self.drop.cuda()
self.word_embedding = self.word_embedding.cuda()
if self._use_bert:
self.bert_model = self.bert_model.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index, :] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def _read_vocab(self, path):
result_vocab = list()
vocab_path = os.path.join(path, 'vocab.txt')
with open(vocab_path, 'r') as f:
vocab = f.readlines()
for v in vocab:
result_vocab.append(v.strip())
return result_vocab
pass
def _is_vocab(self, token):
if token in self.vocab:
return False
return True
pass
def bert_fea(self, ids_batch):
tokens_tensor_batch = []
context_tokens_uncased_batch = []
for ids in ids_batch:
context_tokens_uncased = []
for i in ids:
token = self.word_alphabet.get_instance(i)
if token == '</unk>' or not token or self._is_vocab(token):
context_tokens_uncased.append('[UNK]')
elif token == '<PAD>':
context_tokens_uncased.append('[PAD]')
else:
context_tokens_uncased.append(token)
context_tokens_uncased_batch.append(context_tokens_uncased)
# Tokenized input
# Convert token to vocabulary indices
indexed_tokens = self.tokenizer.convert_tokens_to_ids(context_tokens_uncased)
tokens_tensor_batch.append(indexed_tokens)
# Define sentence A and B indices associated to 1st and 2nd sentences (see paper)
tokens_tensor_batch = torch.tensor(tokens_tensor_batch)
if self.gpu:
tokens_tensor_batch = tokens_tensor_batch.to('cuda')
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = self.bert_model(tokens_tensor_batch)
# get the avg of last 4 layers hidden states (for each token)
# batchsize * doc len * 768 (bert hidden size)
avg = sum(encoded_layers) / len(encoded_layers)
# we do not use [CLS] fea and only use the first 100 of avg4
context_bert_feature_batch = avg[:, :, :]
return context_bert_feature_batch
def forward(self, word_inputs, word_seq_lengths):
"""
input:
word_inputs: (batch_size, sent_len)
word_seq_lengths: list of batch_size, (batch_size,1)
output:
Variable(batch_size, sent_len, hidden_dim)
"""
word_embs = self.word_embedding(word_inputs)
word_list = [word_embs]
if self._use_bert:
context_bert_feature_batch = self.bert_fea(word_inputs)
word_list.append(context_bert_feature_batch)
word_embs = torch.cat(word_list, 2)
word_represent = self.drop(word_embs)
return word_represent | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/wordrep.py | wordrep.py |
from __future__ import print_function
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
START_TAG = -2
STOP_TAG = -1
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1,
m_size) # B * M
class CRF(nn.Module):
def __init__(self, tagset_size, gpu):
super(CRF, self).__init__()
print('build CRF...')
self.gpu = gpu
# Matrix of transition parameters. Entry i,j is the score of transitioning from i to j.
self.tagset_size = tagset_size
# # We add 2 here, because of START_TAG and STOP_TAG
# # transitions (f_tag_size, t_tag_size), transition value from f_tag to t_tag
init_transitions = torch.zeros(self.tagset_size + 2, self.tagset_size + 2)
init_transitions[:, START_TAG] = -10000.0
init_transitions[STOP_TAG, :] = -10000.0
init_transitions[:, 0] = -10000.0
init_transitions[0, :] = -10000.0
if self.gpu:
init_transitions = init_transitions.cuda()
self.transitions = nn.Parameter(init_transitions)
# self.transitions = nn.Parameter(torch.Tensor(self.tagset_size+2, self.tagset_size+2))
# self.transitions.data.zero_()
def _calculate_PZ(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
masks: (batch, seq_len)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
# print feats.view(seq_len, tag_size)
assert (tag_size == self.tagset_size + 2)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1) # bat_size * to_target_size
# add start score (from start to all tag, duplicate to batch_size)
# partition = partition + self.transitions[START_TAG,:].view(1, tag_size, 1).expand(batch_size, tag_size, 1)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size,
tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
# print cur_partition.data
# (bat_size * from_target * to_target) -> (bat_size * to_target)
# partition = utils.switch(partition, cur_partition, mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size)).view(bat_size, -1)
mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)
# effective updated partition part, only keep the partition value of mask value = 1
masked_cur_partition = cur_partition.masked_select(mask_idx)
# let mask_idx broadcastable, to disable warning
mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
# replace the partition where the maskvalue=1, other partition value keeps the same
partition.masked_scatter_(mask_idx, masked_cur_partition)
# until the last state, add transition score for all partition (and do log_sum_exp) then select the value in STOP_TAG
cur_values = self.transitions.view(1, tag_size, tag_size).expand(batch_size, tag_size,
tag_size) + partition.contiguous().view(
batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
final_partition = cur_partition[:, STOP_TAG]
return final_partition.sum(), scores
def _viterbi_decode(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, seq_len) decoded sequence
path_score: (batch, 1) corresponding score for each sequence (to be implementated)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert (tag_size == self.tagset_size + 2)
# calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# mask to (seq_len, batch_size)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
# record the position of best score
back_points = list()
partition_history = list()
# reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).bool()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size) # bat_size * to_target_size
# print "init part:",partition.size()
partition_history.append(partition)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size,
tag_size)
# forscores, cur_bp = torch.max(cur_values[:,:-2,:], 1) # do not consider START_TAG/STOP_TAG
# print "cur value:", cur_values.size()
partition, cur_bp = torch.max(cur_values, 1)
# print "partsize:",partition.size()
# exit(0)
# print partition
# print cur_bp
# print "one best, ",idx
partition_history.append(partition)
# cur_bp: (batch_size, tag_size) max source score position in current tag
# set padded label as 0, which will be filtered in post processing
cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)
back_points.append(cur_bp)
# exit(0)
# add score to final STOP_TAG
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, -1).transpose(1,
0).contiguous() ## (batch_size, seq_len. tag_size)
# get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size, 1, 1).expand(batch_size, 1, tag_size) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, 1)
# calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1, tag_size,
tag_size).expand(
batch_size, tag_size, tag_size)
_, last_bp = torch.max(last_values, 1)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)
# select end ids in STOP_TAG
pointer = last_bp[:, STOP_TAG]
insert_last = pointer.contiguous().view(batch_size, 1, 1).expand(batch_size, 1, tag_size)
back_points = back_points.transpose(1, 0).contiguous()
# move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last
back_points.scatter_(1, last_position, insert_last)
# print "bp:",back_points
# exit(0)
back_points = back_points.transpose(1, 0).contiguous()
# decode from the end, padded position ids are 0, which will be filtered if following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.detach()
for idx in range(len(back_points) - 2, -1, -1):
pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))
decode_idx[idx] = pointer.detach().view(batch_size)
path_score = None
decode_idx = decode_idx.transpose(1, 0)
return path_score, decode_idx
def forward(self, feats):
path_score, best_path = self._viterbi_decode(feats)
return path_score, best_path
def _score_sentence(self, scores, mask, tags):
"""
input:
scores: variable (seq_len, batch, tag_size, tag_size)
mask: (batch, seq_len)
tags: tensor (batch, seq_len)
output:
score: sum of score for gold sequences within whole batch
"""
# Gives the score of a provided tag sequence
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
# convert tag value into a new format, recorded label bigram information to index
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if self.gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
# start -> first score
new_tags[:, 0] = (tag_size - 2) * tag_size + tags[:, 0]
else:
new_tags[:, idx] = tags[:, idx - 1] * tag_size + tags[:, idx]
# transition for label to STOP_TAG
end_transition = self.transitions[:, STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)
# length for batch, last word position = length - 1
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# index the label id of last word
end_ids = torch.gather(tags, 1, length_mask - 1)
# index the transition score for end_id to STOP_TAG
end_energy = torch.gather(end_transition, 1, end_ids)
# convert tag as (seq_len, batch_size, 1)
new_tags = new_tags.transpose(1, 0).contiguous().view(seq_len, batch_size, 1)
# need convert tags id to search from 400 positions of scores
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len,
batch_size) # seq_len * bat_size
# mask transpose to (seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1, 0))
# ## calculate the score from START_TAG to first label
# start_transition = self.transitions[START_TAG,:].view(1, tag_size).expand(batch_size, tag_size)
# start_energy = torch.gather(start_transition, 1, tags[0,:])
# add all score together
# gold_score = start_energy.sum() + tg_energy.sum() + end_energy.sum()
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def neg_log_likelihood_loss(self, feats, mask, tags):
# nonegative log likelihood
batch_size = feats.size(0)
forward_score, scores = self._calculate_PZ(feats, mask)
gold_score = self._score_sentence(scores, mask, tags)
# print "batch, f:", forward_score.data[0], " g:", gold_score.data[0], " dis:", forward_score.data[0] - gold_score.data[0]
# exit(0)
return forward_score - gold_score
def _viterbi_decode_nbest(self, feats, mask, nbest):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, nbest, seq_len) decoded sequence
path_score: (batch, nbest) corresponding score for each sequence (to be implementated)
nbest decode for sentence with one token is not well supported, to be optimized
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert (tag_size == self.tagset_size + 2)
# calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim=1).view(batch_size, 1).long()
# mask to (seq_len, batch_size)
mask = mask.transpose(1, 0).contiguous()
ins_num = seq_len * batch_size
# be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1, 0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
# need to consider start
scores = feats + self.transitions.view(1, tag_size, tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
# record the position of best score
back_points = list()
partition_history = list()
# reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).bool()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone() # bat_size * to_target_size
# initial partition [batch_size, tag_size]
partition_history.append(partition.view(batch_size, tag_size, 1).expand(batch_size, tag_size, nbest))
# iter over last scores
for idx, cur_values in seq_iter:
if idx == 1:
cur_values = cur_values.view(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size,
tag_size,
1).expand(
batch_size, tag_size, tag_size)
else:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * nbest * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values.view(batch_size, tag_size, 1, tag_size).expand(batch_size, tag_size, nbest,
tag_size) + partition.contiguous().view(
batch_size, tag_size, nbest, 1).expand(batch_size, tag_size, nbest, tag_size)
# compare all nbest and all from target
cur_values = cur_values.view(batch_size, tag_size * nbest, tag_size)
# print "cur size:",cur_values.size()
partition, cur_bp = torch.topk(cur_values, nbest, 1)
# cur_bp/partition: [batch_size, nbest, tag_size], id should be normize through nbest in following backtrace step
# print partition[:,0,:]
# print cur_bp[:,0,:]
# print "nbest, ",idx
if idx == 1:
cur_bp = cur_bp * nbest
partition = partition.transpose(2, 1)
cur_bp = cur_bp.transpose(2, 1)
# print partition
# exit(0)
# partition: (batch_size * to_target * nbest)
# cur_bp: (batch_size * to_target * nbest) Notice the cur_bp number is the whole position of tag_size*nbest, need to convert when decode
partition_history.append(partition)
# cur_bp: (batch_size,nbest, tag_size) topn source score position in current tag
# set padded label as 0, which will be filtered in post processing
# mask[idx] ? mask[idx-1]
cur_bp.masked_fill_(mask[idx].view(batch_size, 1, 1).expand(batch_size, tag_size, nbest), 0)
# print cur_bp[0]
back_points.append(cur_bp)
# add score to final STOP_TAG
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, tag_size, nbest).transpose(1,
0).contiguous() ## (batch_size, seq_len, nbest, tag_size)
# get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size, 1, 1, 1).expand(batch_size, 1, tag_size, nbest) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, nbest, 1)
# calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, nbest, tag_size) + self.transitions.view(1, tag_size,
1,
tag_size).expand(
batch_size, tag_size, nbest, tag_size)
last_values = last_values.view(batch_size, tag_size * nbest, tag_size)
end_partition, end_bp = torch.topk(last_values, nbest, 1)
# end_partition: (batch, nbest, tag_size)
end_bp = end_bp.transpose(2, 1)
# end_bp: (batch, tag_size, nbest)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size, nbest)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size, nbest)
# select end ids in STOP_TAG
pointer = end_bp[:, STOP_TAG, :] ## (batch_size, nbest)
insert_last = pointer.contiguous().view(batch_size, 1, 1, nbest).expand(batch_size, 1, tag_size, nbest)
back_points = back_points.transpose(1, 0).contiguous()
# move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last[0]
# exit(0)
# copy the ids of last position:insert_last to back_points, though the last_position index
# last_position includes the length of batch sentences
# print "old:", back_points[9,0,:,:]
back_points.scatter_(1, last_position, insert_last)
# back_points: [batch_size, seq_length, tag_size, nbest]
# print "new:", back_points[9,0,:,:]
# exit(0)
# print pointer[2]
'''
back_points: in simple demonstratration
x,x,x,x,x,x,x,x,x,7
x,x,x,x,x,4,0,0,0,0
x,x,6,0,0,0,0,0,0,0
'''
back_points = back_points.transpose(1, 0).contiguous()
# print back_points[0]
# back_points: (seq_len, batch, tag_size, nbest)
# decode from the end, padded position ids are 0, which will be filtered in following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size, nbest))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.data / nbest
# print "pointer-1:",pointer[2]
# exit(0)
# use old mask, let 0 means has token
for idx in range(len(back_points) - 2, -1, -1):
# print "pointer: ",idx, pointer[3]
# print "back:",back_points[idx][3]
# print "mask:",mask[idx+1,3]
new_pointer = torch.gather(back_points[idx].view(batch_size, tag_size * nbest), 1,
pointer.contiguous().view(batch_size, nbest))
decode_idx[idx] = new_pointer.data / nbest
# # use new pointer to remember the last end nbest ids for non longest
pointer = new_pointer + pointer.contiguous().view(batch_size, nbest) * mask[idx].view(batch_size, 1).expand(
batch_size, nbest).long()
# exit(0)
path_score = None
decode_idx = decode_idx.transpose(1, 0)
# decode_idx: [batch, seq_len, nbest]
# print decode_idx[:,:,0]
# print "nbest:",nbest
# print "diff:", decode_idx[:,:,0]- decode_idx[:,:,4]
# print decode_idx[:,0,:]
# exit(0)
# calculate probability for each sequence
scores = end_partition[:, :, STOP_TAG]
# scores: [batch_size, nbest]
max_scores, _ = torch.max(scores, 1)
minus_scores = scores - max_scores.view(batch_size, 1).expand(batch_size, nbest)
path_score = F.softmax(minus_scores, 1)
# path_score: [batch_size, nbest]
# exit(0)
return path_score, decode_idx | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_event/model/crf.py | crf.py |
import os
from doc_similarity.model.cosine_similarity import CosineSimilarity
from doc_similarity.model.jaccard import JaccardSimilarity
from doc_similarity.model.levenshtein import LevenshteinSimilarity
from doc_similarity.model.min_hash import MinHashSimilarity
from doc_similarity.model.sim_hash import SimHashSimilarity, OldSimHashSimilarity
# from doc_similarity.model.similarity_tx import Similarity
# from doc_similarity.model.total_sim import TotalSimilarity
root_path = '/home/zzsn/liuyan/word2vec/doc_similarity'
stop_words_path = os.path.join(root_path, 'stop_words.txt')
cos_sim = CosineSimilarity(stop_words_path=stop_words_path)
jac_sim = JaccardSimilarity(stop_words_path=stop_words_path)
lev_sim = LevenshteinSimilarity(stop_words_path=stop_words_path)
min_hash_sim = MinHashSimilarity(stop_words_path=stop_words_path)
sim_hash_sim = SimHashSimilarity(stop_words_path=stop_words_path)
old_sim_hash_sim = OldSimHashSimilarity()
# ctt_sim = Similarity(
# model_path=os.path.join(root_path, 'Tencent_AILab_ChineseEmbedding_Min.txt'),
# stopword_path=os.path.join(root_path, 'stopwords.txt')
# )
# total_sim = TotalSimilarity(root_path=root_path)
sim_dict = {
'cos_sim': cos_sim,
'jac_sim': jac_sim,
'lev_sim': lev_sim,
'min_hash': min_hash_sim,
'sim_hash': old_sim_hash_sim,
# 'ctt_sim': ctt_sim,
'false': False
}
# def compare_all(total_list: list) -> list:
# result_list = []
# total_len = len(total_list)
# for index_x in range(total_len):
# article_x = total_list[index_x]
# for index_y in range(index_x + 1, total_len):
# article_y = total_list[index_y]
# result_dict_title = total_sim.calculate(article_x['title'], article_y['title'])
# result_dict_content = total_sim.calculate(article_x['content'], article_y['content'])
# result_list.append([
# article_x['id'], article_y['id'],
# result_dict_title, result_dict_content
# ])
# return result_list
# pass
def compare_sim_name(title_sim_name: str or bool, content_sim_name: str or bool) -> dict or list:
if title_sim_name in sim_dict:
title_sim = sim_dict[title_sim_name]
else:
return {
'handleMsg': '所选标题相似度算法名称错误或不存在!请核查(cos_sim / jac_sim / lev_sim / min_hash / sim_hash / false)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
if content_sim_name in sim_dict:
content_sim = sim_dict[content_sim_name]
else:
return {
'handleMsg': '所选正文相似度算法名称错误或不存在!请核查(cos_sim / jac_sim / lev_sim / min_hash / sim_hash / false)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
return [title_sim, content_sim]
def compare_single(article_list: list, title_sim_name: str or bool, content_sim_name: str or bool) -> dict:
judge_sim_name = compare_sim_name(title_sim_name=title_sim_name, content_sim_name=content_sim_name)
if type(judge_sim_name) is dict:
return judge_sim_name
else:
title_sim, content_sim = judge_sim_name[0], judge_sim_name[1]
if len(article_list) == 2:
article_x, article_y = article_list[0], article_list[1]
title_similarity = title_sim.calculate(
article_x['title'], article_y['title']
) if title_sim else 0.0
content_similarity = content_sim.calculate(
article_x['content'], article_y['content']
) if content_sim else 0.0
result_dict = {
'id_x': article_x['id'],
'id_y': article_y['id'],
'title_sim': title_similarity,
'content_sim': content_similarity
}
else:
return {
'handleMsg': '所对比文章数量不是 2 篇,请核查!',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
return {
'handleMsg': 'success',
'isHandleSuccess': True,
'logs': None,
'resultData': result_dict
}
pass
def compare_many(article_list: list, title_sim_name: str or bool, content_sim_name: str or bool) -> dict:
judge_sim_name = compare_sim_name(title_sim_name=title_sim_name, content_sim_name=content_sim_name)
if type(judge_sim_name) is dict:
return judge_sim_name
else:
title_sim, content_sim = judge_sim_name[0], judge_sim_name[1]
result_list = []
total_len = len(article_list)
if total_len < 3:
return {
'handleMsg': '所对比文章数量少于 3 篇,请核查!(2 篇文章对比请使用接口1 similarity)',
'isHandleSuccess': False,
'logs': None,
'resultData': None
}
else:
for index in range(total_len):
article = article_list[index]
article_list[index]['title_transform'] = title_sim.transform(article['title']) if title_sim else None
article_list[index]['content_transform'] = content_sim.transform(
article['content']) if content_sim else None
for index_x in range(total_len):
article_x = article_list[index_x]
for index_y in range(index_x + 1, total_len):
article_y = article_list[index_y]
title_similarity = title_sim.calculate_transform(
article_x['title_transform'], article_y['title_transform']
) if title_sim else 0.0
content_similarity = content_sim.calculate_transform(
article_x['content_transform'], article_y['content_transform']
) if content_sim else 0.0
result_list.append({
'id_x': article_x['id'],
'id_y': article_y['id'],
'title_sim': title_similarity,
'content_sim': content_similarity
})
return {
'handleMsg': 'success',
'isHandleSuccess': True,
'logs': None,
'resultData': {
'sim_list': result_list
}
}
pass
if __name__ == '__main__':
result_dict = compare_single([
{
'id': 1,
'title': 'I love YanLiu',
'content': 'YingLiang love YanLiu'
},
{
'id': 2,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
}
], 'cos_sim', 'sim_hash')
print(result_dict)
result_list = compare_many([
{
'id': 1,
'title': 'I love YanLiu',
'content': 'YingLiang love YanLiu'
},
{
'id': 2,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
},
{
'id': 3,
'title': 'I love YingLiang',
'content': 'YanLiu love YingLiang'
}
], 'lev_sim', 'sim_hash')
print(result_list)
pass | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/compare.py | compare.py |
import xlrd
import xlsxwriter
def xlsx2list(xlsx_path: str) -> list:
wb = xlrd.open_workbook(xlsx_path)
sh = wb.sheet_by_name('Sheet1')
total_list = list()
for i in range(sh.nrows):
if i < 3:
continue
row = sh.row_values(i)
total_list.append({
'id': int(row[0]),
'title': row[1].replace('\n', '').replace('\r', '').replace('\t', ''),
'content': row[2].replace('\n', '').replace('\r', '').replace('\t', '')
})
# row = sh.row_values(i)
# total_list.append({
# 'id': i,
# 'title': row[0].replace('\n', '').replace('\r', '').replace('\t', ''),
# 'content': row[1].replace('\n', '').replace('\r', '').replace('\t', '')
# })
return total_list
def list2xlsx(xlsx_path=None, result_lists=None):
workbook = xlsxwriter.Workbook(xlsx_path)
worksheet = workbook.add_worksheet('result')
worksheet.write_row(
0, 0, [
'content_id_x', 'content_id_y',
'cos_sim', 'jac_sim', 'lev_sim',
'min_hash', 'old_sim_hash', 'new_sim_hash',
'ctt_sim',
'cos_sim', 'jac_sim', 'lev_sim',
'min_hash', 'old_sim_hash', 'new_sim_hash',
'ctt_sim',
]
)
for index, result in enumerate(result_lists):
worksheet.write_row(
index + 1, 0, [
result[0],
result[1],
result[2]['result_cos_sim'],
result[2]['result_jac_sim'],
result[2]['result_lev_sim'],
result[2]['result_min_hash_sim'],
result[2]['result_old_sim_hash_sim'],
result[2]['result_new_sim_hash_sim'],
result[2]['result_sim_tx'],
result[3]['result_cos_sim'],
result[3]['result_jac_sim'],
result[3]['result_lev_sim'],
result[3]['result_min_hash_sim'],
result[3]['result_old_sim_hash_sim'],
result[3]['result_new_sim_hash_sim'],
result[3]['result_sim_tx'],
]
)
workbook.close()
if __name__ == '__main__':
xlsx_path = '../data/total_datasets.xlsx'
total_list = xlsx2list(xlsx_path=xlsx_path)
pass | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/data/data_process.py | data_process.py |
import re
import math
from simhash import Simhash
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class OldSimHashSimilarity(BaseSimilarity):
def __init__(self):
super(OldSimHashSimilarity, self).__init__()
@staticmethod
def _filter_html(html):
"""
:param html: html
:return: 返回去掉html的纯净文本
"""
dr = re.compile(r'<[^>]+>', re.S)
dd = dr.sub('', html).strip()
return dd
def calculate(self, text_1: str, text_2: str): # 求两篇文章相似度
"""
:param text_1: 文本_1
:param text_2: 文本_2
:return: 返回两篇文章的相似度
"""
simhash_1 = Simhash(text_1)
simhash_2 = Simhash(text_2)
# print(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
max_hash_bit = max(len(bin(simhash_1.value)), (len(bin(simhash_2.value))))
# print(max_hash_bit)
# 海明距离(Hamming distance)
hamming_distance = simhash_1.distance(simhash_2)
# print(hamming_distance)
similarity = 1 - hamming_distance / max_hash_bit
return similarity
def transform(self, content: str) -> object:
simhash = Simhash(content)
return simhash
pass
def calculate_transform(self, transform_x: Simhash, transform_y: Simhash) -> float:
"""
:param transform_x: simhash_1
:param transform_y: simhash_2
:return:
"""
max_hash_bit = max(len(bin(transform_x.value)), (len(bin(transform_y.value))))
hamming_distance = transform_x.distance(transform_y)
similarity = 1 - hamming_distance / max_hash_bit
return similarity
pass
class SimHashSimilarity(object):
"""
SimHash
对单词数量低于500的文章误差较大。
"""
def __init__(self, stop_words_path: str):
self._tool = Tool(stop_words_path=stop_words_path)
pass
@staticmethod
def get_bin_str(source): # 字符串转二进制
if source == '':
return 0
else:
t = ord(source[0]) << 7
m = 1000003
mask = 2 ** 128 - 1
for c in source:
t = ((t * m) ^ ord(c)) & mask
t ^= len(source)
if t == -1:
t = -2
t = bin(t).replace('0b', '').zfill(64)[-64:]
return str(t)
def _run(self, keywords):
ret = []
for keyword, weight in keywords:
bin_str = self.get_bin_str(keyword)
key_list = []
for c in bin_str:
weight = math.ceil(weight)
if c == '1':
key_list.append(int(weight))
else:
key_list.append(-int(weight))
ret.append(key_list)
# 对列表进行"降维"
rows = len(ret)
cols = len(ret[0])
result = []
for i in range(cols):
tmp = 0
for j in range(rows):
tmp += int(ret[j][i])
if tmp > 0:
tmp = '1'
elif tmp <= 0:
tmp = '0'
result.append(tmp)
return ''.join(result)
def calculate(self, content_x: str, content_y: str):
# 提取关键词
s1 = self._tool.extract_keyword(content_x, withWeigth=True)
s2 = self._tool.extract_keyword(content_y, withWeigth=True)
sim_hash_1 = self._run(s1)
sim_hash_2 = self._run(s2)
# print(f'相似哈希指纹1: {sim_hash1}\n相似哈希指纹2: {sim_hash2}')
length = 0
for index, char in enumerate(sim_hash_1):
if char == sim_hash_2[index]:
continue
else:
length += 1
return length | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/sim_hash.py | sim_hash.py |
import numpy as np
import gensim
import jieba
import re
from sklearn.metrics.pairwise import cosine_similarity
class Similarity(object):
def __init__(self, model_path, stopword_path):
self.Word2VecModel = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=False)
self.vocab_list = [word for word, vocab in self.Word2VecModel.wv.vocab.items()]
self.stopword_path = stopword_path
def stop_word_list(self, path):
'''
创建停用词list
:param path:
:return:
'''
stopwords = [line.strip() for line in open(path, 'r', encoding='utf-8').readlines()]
return stopwords
def remove_char(self, text):
'''
保留中文、英语字母、数字和标点
:param text:
:return:
'''
graph_filter = re.compile(r'[^\u4e00-\u9fa5a-zA-Z0-9\s,。\.,?\?!!;;]')
graph = graph_filter.sub('', text)
if len(graph) == 0:
return ''
else:
return graph
def preprocess(self, text):
'''
预处理文本
:param text:
:return:
'''
if isinstance(text, str):
text = self.remove_char(text)
textcut = jieba.cut(text.strip())
stopwords = self.stop_word_list(self.stopword_path)
textcut = filter(lambda x: x in stopwords, textcut)
else:
raise TypeError('text should be str')
return textcut
# 第1个参数是每篇文章分词的结果,第2个参数是word2vec模型对象
def getVector_v4(self, cutWords):
article_vector = np.zeros((1, 200))
for cutWord in cutWords:
if cutWord in self.vocab_list:
article_vector += np.array(self.Word2VecModel.wv[cutWord])
cutWord_vector = article_vector.mean(axis=0)
return cutWord_vector
def calculation_sim(self, text1, text2):
'''
计算相似度
:param texts_train:
:param texts_test:
:return:
'''
text1 = self.preprocess(text1)
text2 = self.preprocess(text2)
matrix_text1 = self.getVector_v4(text1)
matrix_text2 = self.getVector_v4(text2)
dis = cosine_similarity(matrix_text1.reshape(1, -1), matrix_text2.reshape(1, -1))
return dis | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/similarity_tx.py | similarity_tx.py |
from sklearn.metrics.pairwise import cosine_similarity
from doc_similarity.model.base_similarity import BaseSimilarity
from doc_similarity.utils.tool import Tool
class CosineSimilarity(BaseSimilarity):
"""
余弦相似度
"""
def __init__(self, stop_words_path):
super(CosineSimilarity, self).__init__()
self._tool = Tool(stop_words_path=stop_words_path)
@staticmethod
def one_hot(word_dict, keywords): # oneHot编码
# cut_code = [word_dict[word] for word in keywords]
cut_code = [0] * len(word_dict)
for word in keywords:
cut_code[word_dict[word]] += 1
return cut_code
def calculate(self, content_x, content_y):
keywords_1 = self._tool.extract_keyword(content_x, withWeigth=False) # 提取关键词
keywords_2 = self._tool.extract_keyword(content_y, withWeigth=False)
# 词的并集
union = set(keywords_1).union(set(keywords_2))
# 编码
word_dict = {}
i = 0
for word in union:
word_dict[word] = i
i += 1
# oneHot编码
s1_cut_code = self.one_hot(word_dict, keywords_1)
s2_cut_code = self.one_hot(word_dict, keywords_2)
# 余弦相似度计算
sample = [s1_cut_code, s2_cut_code]
# 除零处理
try:
sim = cosine_similarity(sample)
return sim[1][0]
except Exception as e:
print(e)
return 0.0
def transform(self, content: str) -> object:
keywords = self._tool.extract_keyword(content, withWeigth=False) # 提取关键词
return keywords
pass
def calculate_transform(self, transform_x: object, transform_y: object) -> float:
"""
:param transform_x: keywords_1
:param transform_y: keywords_2
:return: float
"""
# 词的并集
union = set(transform_x).union(set(transform_y))
# 编码
word_dict = {}
i = 0
for word in union:
word_dict[word] = i
i += 1
# oneHot编码
s1_cut_code = self.one_hot(word_dict, transform_x)
s2_cut_code = self.one_hot(word_dict, transform_y)
# 余弦相似度计算
sample = [s1_cut_code, s2_cut_code]
# 除零处理
try:
sim = cosine_similarity(sample)
return sim[1][0]
except Exception as e:
print(e)
return 0.0
pass | zzsn-nlp | /zzsn_nlp-0.0.1.tar.gz/zzsn_nlp-0.0.1/doc_similarity/model/cosine_similarity.py | cosine_similarity.py |
import os
import ftplib
from ftplib import FTP
from flask import Flask, request, url_for, send_from_directory
from werkzeug.utils import secure_filename
from julei.kmeans import Kmeans
HOST = '127.0.0.1'
DEBUG = False
PORT = 8010
ALLOWED_EXTENSIONS = set(['xls', 'xlsx'])
app = Flask(__name__)
# 限定上传文件最大不超过50M
app.config['MAX_CONTENT_LENGTH'] = 100 * 1024 * 1024
html = '''
<!DOCTYPE html>
<title>文件传输</title>
<h2>文件传输</h2>
<form method='post' enctype='multipart/form-data'>
<input type='file' name='file' multiple="multiple">
<input type='submit' value='传输该文件'>
</form>
'''
htmls = '''
<!DOCTYPE html>
<title>文件传输</title>
<h2>文件传输</h2>
<form method='post' enctype='multipart/form-data'>
<input type='submit' value='开始传输'>
</form>
'''
#连接并登陆FTP
def loginFTP():
ftp = FTP()
ftp.connect('192.168.1.196', 21) # 连接的ftp sever IP和端口
ftp.login('', '') # 连接的用户名,密码如果匿名登录则用空串代替即可
return ftp,True
# 判断文件类型是否符合要求
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[-1] in ALLOWED_EXTENSIONS
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/download', methods=['GET', 'POST'])
def upload_file():
if request.method == 'POST':
ftp, status = loginFTP()
if status == True:
ftp.cwd('./beida')
files = request.files.getlist('file')
# files = request.files['file']
print(files)
for file in files:
if allowed_file(file.filename):
print(file)
filename = secure_filename(file.filename)
ftp.storbinary('STOR ' + filename, file, blocksize=1024)
else:
return html + '文件类型不匹配'
return html + str(len(files)) + '个文件已经传输成功!'
else:
return html + '连接失败'
return html
@app.route('/upload', methods=['GET', 'POST'])
# 上传整个目录下的文件
def ftpDownload():
if request.method == 'POST':
ftp, status = loginFTP()
remote_path = './'
local_path = './data'
if not os.path.exists(local_path):
os.makedirs(local_path)
ftp.cwd(remote_path)
# print(ftp.dir())
for file in ftp.nlst():
print(file)
if allowed_file(file):
local_file = os.path.join(local_path, file)
# print(file.rsplit('.', 1)[-1])
# print(allowed_file(file))
download_file(ftp=ftp, remote_file=file, local_file=local_file)
else:
print('文件类型有误')
ftp.quit()
return htmls +'传输成功'
return htmls
def download_file(ftp, remote_file, local_file):
try:
buf_size = 1024
file_handler = open(local_file, 'wb')
ftp.retrbinary('RETR ' + remote_file, file_handler.write, buf_size)
file_handler.close()
except Exception as err:
print('传输文件出错,出现异常:%s ' % err)
@app.route('/write/', methods=('GET', 'POST'))
def get_train():
try:
km = Kmeans()
km.write()
except Exception as err:
print('出现异常:' + err)
return 'lose'
return '<h2>模型训练成功,相关文件已保存<h2>'
@app.route('/delete/', methods=('GET', 'POST'))
def delete_dir():
print('当前工作目录为' + os.getcwd())
for root,dir,files in os.walk('./data'):
print('data文件夹中包含' + str(files))
for file in files:
if file.rsplit('.')[-1] == 'xlsx':
os.remove('./data/' + file)
if os.path.exists('./result'):
shutil.rmtree('./result/')
return '<h2>删除文件成功</h2>'
app.run(host=HOST, port=PORT, debug=DEBUG) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/julei/app.py | app.py |
import time
import os
import pickle
import numpy as np
import gensim
from julei.tfidf import Tfidf
from julei.word2vec_train import Word2vec
class Representation:
def __init__(self):
pass
def make_dir(self):
if os.path.isdir('result/representation/') == False:
os.makedirs(r'result/representation/')
def load_pkl(self):
with open('result/tfidf/tfidf.pkl','rb') as load1:
tfidf = pickle.load(load1)
with open('result/tfidf/vocabulary_tfidf.pkl','rb') as load2:
vocabulary = pickle.load(load2)
return tfidf,vocabulary
def load_embedding(self):
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始导入腾讯公开中文词向量(200维)')
file_path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(file_path, 'data/Tencent_AILab_ChineseEmbedding.txt')
model_tencent = gensim.models.KeyedVectors.load_word2vec_format(path, binary=False)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入腾讯公开中文词向量(200维)')
vocabulary_tencent = model_tencent.wv.vocab.keys()
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始导入当前数据训练中文词向量(100维)')
word2vector = Word2vec()
model_w2v = word2vector.make_model()
# model_w2v = gensim.models.Word2Vec.load('result/word2vec/wordvector_model')
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入当前数据训练中文词向量(100维)')
vocabulary_w2v = model_w2v.wv.vocab.keys()
return model_tencent,vocabulary_tencent,model_w2v,vocabulary_w2v
def count_embedding(self):
tfidf1 = Tfidf()
tfidf,vocabulary = tfidf1.count_tfidf()
# tfidf,vocabulary = self.load_pkl()
model_tencent,vocabulary_tencent,model_w2v,vocabulary_w2v = self.load_embedding()
num_data = tfidf.shape[0]
V = tfidf.shape[1]
vector_matrix = np.zeros((V,300))
count = 0
for word in vocabulary:
if word in vocabulary_tencent:
vector_tencent = model_tencent.wv.word_vec(word)
else:
vector_tencent = np.random.randn(200)
if word in vocabulary_w2v:
vector_w2v = model_w2v.wv.word_vec(word)
else:
vector_w2v = np.random.randn(100)
vector = np.concatenate((vector_tencent,vector_w2v))
vector_matrix[count] = vector
count += 1
if (count+1) % 10000 == 0:
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',count,'个词向量计算完毕')
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',count,'个词向量计算完毕')
self.make_dir()
with open('result/representation/vector_matrix.pkl', 'wb') as save1:
pickle.dump(vector_matrix, save1, protocol=4)
return num_data,vector_matrix
def text_represent(self):
num_data,vector_matrix = self.count_embedding()
print(num_data)
tfidf,vocabulary = self.load_pkl()
text_representation = np.zeros((num_data,300))
for i in range(num_data):
tmp = tfidf[i].toarray()
weighted_average_vector = np.dot(tmp,vector_matrix)
text_representation[i] = weighted_average_vector
if (i+1)%10000 == 0 or (i+1) == num_data:
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',i+1,'条文本表示计算完毕')
with open('result/representation/text_representation.pkl','wb') as save2:
pickle.dump(text_representation,save2,protocol=4)
print(num_data)
return text_representation
# rep = Representation()
# rep.text_represent() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/julei/representation.py | representation.py |
import time
import os
import re
import pickle
import xlrd
import collections
from pyhanlp import JClass
class Segment:
def __init__(self):
pass
def make_dir(self):
if os.path.isdir('result/segment/') == False:
os.makedirs(r'result/segment/') # 为分词结果创建文件夹
# 定义从excel中读取内容的函数 (excel格式:日期 时间 内容)
def load_data(self,excel_path):
excel = xlrd.open_workbook(excel_path)
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
content = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
content.append(row[2])
return content
def data_segment(self):
file_names = sorted(os.listdir('data/')) # 把data文件夹下的所有原始excel数据的名称作为字符串组成list
original_data = collections.defaultdict(list)
for file_name in file_names:
if file_name[-5:] == '.xlsx':
excel_paths = 'data/' + file_name
content = self.load_data(excel_paths)
print(time.strftime('%Y-%m-%d %H:%M:%S'),file_name.split('_')[0],'文本读取完毕')
original_data[file_name.split('_')[0]] = content # 以日期字符串作为key的原始数据
self.make_dir()
data_segment_txt = open('result/segment/data_segment.txt','wb') # 把分词结果写进txt文件里,以方便训练word2vec
vocabulary_segment = collections.defaultdict(int)
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%\t\n\r\f\b\000\v]"
PerceptronLexicalAnalyzer = JClass('com.hankcs.hanlp.model.perceptron.PerceptronLexicalAnalyzer')
segment = PerceptronLexicalAnalyzer()
for key in original_data.keys():
content = original_data[key]
for i in range(len(content)):
words = list(segment.analyze(content[i]).toWordArray())
for word in words:
if re.findall(find_chinese,word) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,word)[0]) == '':
continue
elif len(re.sub(symbols, "",re.findall(find_chinese,word)[0])) == 1:
continue
else:
word_filtrated = re.sub(symbols, "",re.findall(find_chinese,word)[0])
vocabulary_segment[word_filtrated] += 1
data_segment_txt.write(word_filtrated.encode('utf-8'))
data_segment_txt.write(' '.encode('utf-8'))
data_segment_txt.write('\n'.encode('utf-8'))
if (i+1)%100 == 0 or i+1 == len(content):
print(time.strftime('%Y-%m-%d %H:%M:%S'),key,'第',i+1,'条文本分词完毕并写入')
data_segment_txt.close()
return vocabulary_segment
def dump_pkl(self):
vocabulary_segment = self.data_segment()
with open('result/segment/vocabulary_segment.pkl','wb') as save1:
pickle.dump(vocabulary_segment,save1)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'词表长度:',len(vocabulary_segment))
return vocabulary_segment
def write(self):
vocabulary_segment = self.data_segment()
vocabulary_segment_sorted = sorted(vocabulary_segment.items(),key=lambda item:item[1],reverse=True) # 对字典中词的频率从大到小排序
vocabulary_segment_txt = open('result/segment/vocabulary_segment.txt','wb')
for value in vocabulary_segment_sorted:
vocabulary_segment_txt.write(value[0].encode('utf-8'))
vocabulary_segment_txt.write(' '.encode('utf-8'))
vocabulary_segment_txt.write(str(value[1]).encode('utf-8'))
vocabulary_segment_txt.write('\n'.encode('utf-8'))
vocabulary_segment_txt.close()
# se = Segment()
# se.write() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/julei/segment.py | segment.py |
import time
import xlrd
import os
import math
import pickle
import numpy as np
from openpyxl import Workbook
from sklearn.cluster import KMeans
from julei.representation import Representation
class Kmeans:
def __init__(self):
pass
def make_dir(self, path):
dir_path = os.path.join(os.getcwd(), path)
if os.path.isdir(dir_path) == False:
os.makedirs(dir_path)
def load_pkl(self):
print(time.strftime('%Y-%m-%d %H:%M:%S'), '开始导入数据')
representation = Representation()
data = representation.text_represent()
# with open('result/representation/text_representation.pkl','rb') as load1:
# data = pickle.load(load1)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成导入数据')
num_data = data.shape[0]
print("====================num_data = " + str(num_data))
return data,num_data
#tmp add:
#print("---------------------num_class = "+str(num_class))
def train(self, path ='result/kmeans/'):
data,num_data = self.load_pkl()
#num_class = 20
num_class = int(math.sqrt(num_data))
# print(num_class)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始训练模型')
kmeans = KMeans(n_clusters=num_class, init='k-means++', n_init=5, max_iter=100)
model = kmeans.fit(data)
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成训练模型')
classes = model.labels_
centroids = model.cluster_centers_
result = [[] for j in range(num_class)]
data_cluster = [[] for j in range(num_class)]
for i in range(num_data):
for j in range(num_class):
if classes[i] == j:
result[j].append(i)
data_cluster[j].append(data[i])
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成计算结果')
result_sorted = []
similarity = []
for j in range(num_class):
distances = [(np.linalg.norm(centroids[j] - data_cluster[j][i]),result[j][i]) for i in range(len(result[j]))]
distances_sorted = sorted(distances, key=lambda x: x[0])
result_sorted.append([value[1] for value in distances_sorted])
similarity.append([value[0] for value in distances_sorted])
print(time.strftime('%Y-%m-%d %H:%M:%S'),'完成排序结果')
with open(os.path.join(os.getcwd(), path)+'centroids.pkl','wb') as save1:
pickle.dump(centroids,save1)
return num_class,result_sorted,similarity
def load_data(self, excel_path):
excel = xlrd.open_workbook(excel_path)
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
content = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
content.append([row[0],row[1],row[2],row[3],row[4],row[5],row[6]])#jjia 3ge
return content
def write(self, path ='result/kmeans/'):
self.make_dir(path)
file_names = sorted(os.listdir('data/'))
original_data = []
for file_name in file_names:
if file_name[-5:] == '.xlsx':
content = self.load_data(excel_path='data/'+file_name)
print(time.strftime('%Y-%m-%d %H:%M:%S'),file_name.split('_')[0],'文本读取完毕')
original_data += content
print(time.strftime('%Y-%m-%d %H:%M:%S'),'开始写入结果')
num_class,result_sorted,similarity = self.train(path)
for j in range(num_class):
print(time.strftime('%Y-%m-%d %H:%M:%S'),'第',j+1,'类有',len(result_sorted[j]),'条文本')
workbook = Workbook()
worksheet = workbook.active
worksheet.title = str(len(result_sorted[j]))
worksheet.cell(row=1,column=1).value = '日期'
worksheet.cell(row=1,column=2).value = '时间'
worksheet.cell(row=1,column=3).value = '距离中心欧氏距离'
worksheet.cell(row=1,column=4).value = '内容'
worksheet.cell(row=1, column=5).value = '来源' #新加
worksheet.cell(row=1, column=6).value = '标题' # 新加
worksheet.cell(row=1, column=7).value = '链接' # 新加
count = 1
for i in range(len(result_sorted[j])):
try:
worksheet.cell(row=count+1,column=5).value = original_data[result_sorted[j][i]][3].encode('gbk','ignore').decode('gbk','ignore')#新加
worksheet.cell(row=count + 1, column=6).value = original_data[result_sorted[j][i]][5].encode('gbk','ignore').decode( 'gbk', 'ignore') # 新加
worksheet.cell(row=count + 1, column=7).value = original_data[result_sorted[j][i]][6].encode('gbk',
'ignore').decode(
'gbk', 'ignore') # 新加
# print(original_data[result_sorted[j][i]][3])
worksheet.cell(row=count+1,column=4).value = original_data[result_sorted[j][i]][2].encode('gbk','ignore').decode('gbk', 'ignore')
worksheet.cell(row=count+1,column=1).value = original_data[result_sorted[j][i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=count+1,column=2).value = original_data[result_sorted[j][i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=count+1,column=3).value = similarity[j][i]
count += 1
except Exception as e:
print('str(e):\t\t', str(e))
continue
workbook.save(os.path.join(os.getcwd(), path)+str(count-1)+'_'+str(j+1)+'.xlsx')
print(time.strftime('%Y-%m-%d %H:%M:%S'),j+1,'类写入Excel完毕','\n')
# km = Kmeans()
# km.write() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/julei/kmeans.py | kmeans.py |
from urllib import request, error
import sys
import zipfile
import tarfile
import socket
socket.setdefaulttimeout(15)
def progressbar(cur):
percent = '{:.2%}'.format(cur)
sys.stdout.write('\r')
sys.stdout.write('[%-100s] %s' % ('=' * int(cur*100), percent))
sys.stdout.flush()
print(cur)
def schedule(blocknum,blocksize,totalsize):
'''
blocknum:当前已经下载的块
blocksize:每次传输的块大小
totalsize:网页文件总大小
'''
percent = 0
if totalsize == 0:
percent = 0
elif totalsize == -1 and blocknum==0:
print('响应失败,正在重新连接……')
download()
elif totalsize == -1 and blocknum != 0:
pass
else:
percent = blocknum * blocksize / totalsize
progressbar(percent)
if percent > 1.0:
percent = 1.0
progressbar(percent)
# print('\n'+'download : %.2f%%' %(percent))
def download(url = 'https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0', path = '1.0.0.tar.gz'):
try:
filename,headers = request.urlretrieve(url, path, schedule)
print(headers)
except error.HTTPError as e:
print(e)
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
except error.URLError as e:
print(url + ' download failed!' + '\r\n')
print('请手动下载:%s' %url)
print(e)
except Exception as e:
print(e)
print('请手动下载:%s' %url)
else:
print('\r\n' + url + ' download successfully!')
return filename
def unzip(path = '1.0.0.zip'):
zip_file = zipfile.ZipFile(path)
zip_list = zip_file.namelist() # 得到压缩包里所有文件
for f in zip_list:
zip_file.extract(f) # 循环解压文件到指定目录
zip_file.close() # 关闭文件,必须有,释放内存
def untar(path = '1.0.0.tar.gz'):
tar = tarfile.open(path)
tar.extractall()
tar.close()
def download_decompress(url = 'https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0', path = '1.0.0.tar.gz'):
filename = download(url, path)
try:
if str(filename).split('.')[-1] == 'zip':
print('开始解压zip文件,请等待……')
unzip()
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
try:
if str(filename).split('.')[-1] == 'gz':
print('开始解压tar.gz文件,请等待……')
untar()
print('解压完成,可以使用')
except Exception as e:
print(e)
print('解压失败,请手动解压')
# if __name__ == '__main__':
print('开始下载:https://codeload.github.com/chengtingting980903/zzsnML/tar.gz/1.0.0')
download_decompress()
print('开始下载:https://github.com/chengtingting980903/zzsnML/releases/download/1.0.0/data.zip')
download_decompress(url='https://github.com/chengtingting980903/zzsnML/releases/download/1.0.0/data.zip', path='data.zip') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/download_data/download.py | download.py |
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from werkzeug.utils import secure_filename
import time
import os, sys
# sys.path.append('./app/SVM/')
from sentiment_analysis.svm_app import predict_one
from sentiment_analysis.SVM.svm import svm
import warnings
warnings.filterwarnings('ignore')
DEBUG = False
PORT = 8008
HOST = '0.0.0.0'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.secret_key = 'skfasmknfdhflm-vkllsbzdfmkqo3ooishdhzo295949mfw,fk'
# APP_ROOT = os.path.abspath('.')
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
## This function is not for sentiment analysis
# @app.route('/api/', methods=('GET', 'POST'))
# def get_result():
# # title = request.args.get('title', '')
# # content = request.args.get('content', '')
# # company = request.args.get('company', '')
# # if title == '' and content == '':
# # return '-2'
# # _content = title + '。' + content
# # # print(_content)
# # relevant = test(_content, company)
# # return relevant
# file_path = request.args.get('file_path', None)
# _all = request.args.get('_all', True)
# prefix = request.args.get('prefix', './')
# if file_path is None:
# return '必须给定输入文件!'
# if type(_all) == str:
# _all = _all.lower()
# if _all == 'false':
# _all = False
# elif _all == 'true':
# _all = True
# else:
# return '_all参数错误,只能取值True或者False。'
# print(file_path, _all, prefix)
# result_file = main(file_path, _all=_all, prefix=prefix)
# return result_file
@app.route('/api2/', methods=('GET', 'POST'))
def get_single_result():
title = request.form['title']
content = request.form['content']
if title == '' and content == '':
return '-1'
_content = title + '。' + content
# print(_content)
sentiment = predict_one(_content)
return sentiment
@app.route('/train/', methods=('GET', 'POST'))
def begin_train():
connection_string = request.form['connection_string']
from_date = request.form['from_date']
to_date = request.form['to_date']
try:
if (connection_string is None) and (from_date is None) and (to_date is None):
print(r'正在使用默认参数训练模型,connection_string为cis/cis_zzsn9988@114.116.91.1:1521/orcl, from_date为2017-06-01, to_date为2017-06-15')
svm.train()
elif (connection_string == '') and (from_date == '') and (to_date == ''):
print(r'正在使用默认参数训练模型,connection_string为cis/cis_zzsn9988@114.116.91.1:1521/orcl, from_date为2017-06-01, to_date为2017-06-15')
svm.train()
else:
print(r'正在使用指定参数训练模型,connection_string为%s, from_date为%s, to_date为%s' %(connection_string, from_date, to_date))
svm.train(connection_string, from_date, to_date)
except Exception as e:
return 'train fail'
else:
return 'train success'
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/app.py | app.py |
from svm import *
import sys
sys.path.append('../../utils')
from augmentation_utils import *
'''
Currently, data augmentation makes result worse. Better augmentation method should be proposed.
'''
connection_string = 'cis/cis_zzsn9988@118.190.174.96:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
(X_cut_train, y_train), (X_cut_test, y_test) = train_test_split(data_cut, y)
X_train = buildVecs(X_cut_train, stopwords, model)
X_test = buildVecs(X_cut_test, stopwords, model)
N_train = len(y_train)
# load emotional dictionary
emotional_dict_file = '../../data/情感词典18级_1.pkl'
emotional_dict = load_emotion_dict(emotional_dict_file)
# data augmentation
X_aug, y_aug = avgvector_virtue_complementary_augmentation(X_cut_train, y_train, model, emotional_dict, \
num_aug=10000, neg_aug_ratio=0.1, ratio=[0.2, 0.6, 0.2], min_virtue_sent_len=100)
X_train_combine = np.concatenate((X_train, X_aug), axis=0)
y_train_combine = np.concatenate((y_train, y_aug))
idx = np.random.permutation(X_train_combine.shape[0])
reverse_idx = np.argsort(idx)
X_train_combine = X_train_combine[idx]
y_train_combine = y_train_combine[idx]
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train_combine)
pca_.save('model/%s~%s_comple_aug.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train_combine)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=2, probability=True)
clf.fit(X_reduced_train, y_train_combine)
clf.save('model/%s~%s_comple_aug.svm' % (from_date, to_date))
# score
y_pred_prob_train = clf.predict_proba(X_reduced_train[reverse_idx[:N_train]])
y_pred_prob_test = clf.predict_proba(X_reduced_test)
y_pred_train = y_pred_prob_train[:,0] < y_pred_prob_train[:,1]
y_pred_test = y_pred_prob_test[:,0] < y_pred_prob_test[:,1]
y_pred_train = y_pred_train.astype(np.int32)
y_pred_test = y_pred_test.astype(np.int32)
train_score = compute_score(y_train_combine[reverse_idx[:N_train]], y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+aug+PCA)')
print_score(test_score, 'Test score of SVM classifier(+aug+PCA)') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_with_virtual_complementary_augmentation.py | svm_with_virtual_complementary_augmentation.py |
import sys
# sys.path.append('../../utils')
from sentiment_analysis.utils.utils import *
from sentiment_analysis.utils.word2vec_utils import *
from sklearn.svm import SVC
from sklearn.externals import joblib
import os
import cx_Oracle
class svm():
def __init__(self, label_dict=None, probability=True, C=5, kernel='rbf', degree=3, gamma='auto', coef0=0.0):
self.label_dict = label_dict
self.probability = probability
self.C = C
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self._svm = SVC(C=self.C, probability=self.probability, class_weight='balanced', kernel=self.kernel, \
degree=self.degree, gamma=self.gamma, coef0=self.coef0)
def fit(self, X, y):
self._svm.fit(X, y)
def predict(self, X, return_real_label=False):
if return_real_label:
assert self.label_dict is not None
return [self.label_dict[p] for p in self._svm.predict(X)]
return self._svm.predict(X)
def predict_proba(self, X):
if self.probability:
return self._svm.predict_proba(X)
else:
raise ValueError('If you want to get the predict probability, fit svm with probability=True.')
def save(self, save_to):
joblib.dump(self._svm, save_to)
# if __name__ == '__main__':
def train(connection_string = 'cis/cis_zzsn9988@114.116.91.1:1521/orcl', from_date = '2017-06-01', to_date = '2017-08-03'):
# connection_string = 'cis/cis_zzsn9988@118.190.174.96:1521/orcl'
wordvec_file = './data/news.ten.zh.text.vector'
stopwords_file = './data/stop_words.txt'
data_file = './data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
X = buildVecs(data_cut, stopwords, model)
(X_train, y_train), (X_test, y_test) = train_test_split(X, y)
# perform pca
print('Performing PCA...')
dir_path = os.path.join(os.getcwd(),'./model')
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train)
pca_.save('model/%s~%s.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=5, probability=True)
clf.fit(X_reduced_train, y_train)
clf.save('model/%s~%s.svm' % (from_date, to_date))
# score
y_pred_train = clf.predict(X_reduced_train)
y_pred_test = clf.predict(X_reduced_test)
train_score = compute_score(y_train, y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+PCA)')
print_score(test_score, 'Test score of SVM classifier(+PCA)') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm.py | svm.py |
from svm import *
import sys
sys.path.append('../../utils')
from augmentation_utils import *
'''
Currently, data augmentation makes result worse. Better augmentation method should be proposed.
'''
connection_string = 'cis/cis_zzsn9988@118.190.174.96:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
(X_cut_train, y_train), (X_cut_test, y_test) = train_test_split(data_cut, y)
X_train = buildVecs(X_cut_train, stopwords, model)
X_test = buildVecs(X_cut_test, stopwords, model)
# load emotional dictionary
emotional_dict_file = '../../data/情感词典18级_1.pkl'
emotional_dict = load_emotion_dict(emotional_dict_file)
# data augmentation
X_aug, y_aug = avgvector_virtue_augmentation(X_cut_train, y_train, model, emotional_dict, \
num_aug=10000, neg_aug_ratio=0.1, ratio=[0.4, 0.6, 0.0], min_virtue_sent_len=100)
X_train_combine = np.concatenate((X_train, X_aug), axis=0)
y_train_combine = np.concatenate((y_train, y_aug))
idx = np.random.permutation(X_train_combine.shape[0])
reverse_idx = np.argsort(idx)
X_train_combine = X_train_combine[idx]
y_train_combine = y_train_combine[idx]
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train_combine)
pca_.save('model/%s~%s_aug.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train_combine)
X_reduced_test = pca_.transform(X_test)
# train svm
print('Training SVM...')
clf = svm(C=2, probability=True)
clf.fit(X_reduced_train, y_train_combine)
clf.save('model/%s~%s_aug.svm' % (from_date, to_date))
# score
y_pred_train = clf.predict(X_reduced_train)
y_pred_test = clf.predict(X_reduced_test)
train_score = compute_score(y_train_combine, y_pred_train, classes=[0, 1])
test_score = compute_score(y_test, y_pred_test, classes=[0, 1])
print_score(train_score, 'Train score of SVM classifier(+aug+PCA)')
print_score(test_score, 'Test score of SVM classifier(+aug+PCA)') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_with_virtual_data_augmentation.py | svm_with_virtual_data_augmentation.py |
import sys
sys.path.append('../../utils')
from utils import *
from word2vec_utils import *
from sklearn.svm import SVC
import os
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
connection_string = 'cis/cis_zzsn9988@118.190.174.96:1521/orcl'
from_date = '2017-06-01'
to_date = '2017-08-03'
wordvec_file = '../../data/news.ten.zh.text.vector'
stopwords_file = '../../data/stop_words.txt'
data_file = '../../data/%s~%s.pkl' % (from_date, to_date)
if os.path.exists(data_file):
data_cut, y = load_data_from_pickle(data_file)
else:
# connect database
ora_conn = cx_Oracle.connect(connection_string)
# fetch data
data, y = fetch_data_from_oracle(ora_conn, from_date, to_date, label_map={'负': 0, '非负': 1})
# cut data
data_cut = segment(data)
save_data([data_cut, y], data_file)
# doc2vec: average vector
print('Loading word vectors...')
model = load_wordvec(wordvec_file, binary=False)
stopwords = load_stopwords(stopwords_file, encoding='utf-8')
X = buildVecs(data_cut, stopwords, model)
(X_train, y_train), (X_test, y_test) = train_test_split(X, y)
# perform pca
print('Performing PCA...')
n_components = 100
pca_ = pca(n_components=n_components)
pca_.fit(X_train)
# pca_.save('%s~%s.pca' % (from_date, to_date))
print('%s components can explain %.2f%% variance.' % (n_components, pca_.ratio_*100))
X_reduced_train = pca_.transform(X_train)
X_reduced_test = pca_.transform(X_test)
# train svm
# param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['rbf'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['sigmoid'], 'class_weight': ['balanced', None], 'probability': [True]},
# {'C': [1, 10, 100, 1000], 'degree': [2, 3, 4], 'kernel': ['poly'], 'class_weight': ['balanced', None], 'probability': [True]}
# ]
param_grid = [
{'C': [5, 10, 20, 30], 'kernel': ['linear'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['rbf'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'gamma': [0.001, 0.0001, 'auto'], 'kernel': ['sigmoid'], 'class_weight': ['balanced', None]},
{'C': [5, 10, 20, 30], 'degree': [2, 3, 4], 'kernel': ['poly'], 'class_weight': ['balanced', None]}
]
print('Training SVM...')
scores = ['precision', 'recall', 'f1']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), param_grid, cv=5, n_jobs=5,
scoring='%s_macro' % score)
clf.fit(X_reduced_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_reduced_test)
print(classification_report(y_true, y_pred))
print() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/SVM/svm_parameter_selection.py | svm_parameter_selection.py |
from tfidf_utils import Vocabulary
import numpy as np
import time
def avgvector_virtue_augmentation(cut_data, label, model, emotion_dict, num_aug=10000, neg_aug_ratio=0.8, \
ratio=[0.3, 0.5, 0.2], min_virtue_sent_len=10, max_virtue_sent_len=500):
'''ratio: [p1, p2, p3], p1: prob of words from related emotion dict; p2: prob of words from related vocab;
p3: prob of words from opposite vocab.
'''
assert len(cut_data) == len(label)
assert min_virtue_sent_len <= max_virtue_sent_len and min_virtue_sent_len >= 1
signature = int(time.time())
neg_vocab = Vocabulary(signature=signature, name='negative')
pos_vocab = Vocabulary(signature=signature, name='positive')
vocab_dict = {0: neg_vocab, 1: pos_vocab} # label=0 stands for negative
for i, d in enumerate(cut_data):
vocab_dict[label[i]].update(d)
emotion_dict_ = {'neg': [], 'pos': []}
for word, strength in emotion_dict.items():
if strength <= 0:
emotion_dict_['neg'].append(word)
else:
emotion_dict_['pos'].append(word)
num_neg = max(int(num_aug * neg_aug_ratio), 1)
num_pos = max(num_aug - num_neg, 1)
aug_data = []
aug_label = []
neg_words = list(neg_vocab.voc.keys())
neg_words_prob = list(neg_vocab.voc.values())
neg_words_prob = np.array(neg_words_prob) / np.sum(neg_words_prob)
pos_words = list(pos_vocab.voc.keys())
pos_words_prob = list(pos_vocab.voc.values())
pos_words_prob = np.array(pos_words_prob) / np.sum(pos_words_prob)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_neg)
for i in range(num_neg):
d_ = []
n_neg_related_vocab_words = int(ratio[1]*sents_len[i])
n_neg_opposite_vocab_words = int(ratio[2]*sents_len[i])
n_neg_emotion_words = sents_len[i] - n_neg_opposite_vocab_words - n_neg_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['neg'], replace=True, size=n_neg_emotion_words))
# words from related vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_neg_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_neg_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(0)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_pos)
for i in range(num_pos):
d_ = []
n_pos_related_vocab_words = int(ratio[1]*sents_len[i])
n_pos_opposite_vocab_words = int(ratio[2]*sents_len[i])
n_pos_emotion_words = sents_len[i] - n_pos_opposite_vocab_words - n_pos_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['pos'], replace=True, size=n_pos_emotion_words))
# words from related vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_pos_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_pos_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(0)
return aug_data, aug_label
def avgvector_virtue_complementary_augmentation(cut_data, label, model, emotion_dict, num_aug=10000, neg_aug_ratio=0.8, \
ratio=[0.3, 0.5, 0.2], min_virtue_sent_len=10, max_virtue_sent_len=500):
'''ratio: [p1, p2, p3], p1: prob of words from opposite emotion dict; p2: prob of words from opposite vocab;
p3: prob of words from related vocab.
'''
assert len(cut_data) == len(label)
assert min_virtue_sent_len <= max_virtue_sent_len and min_virtue_sent_len >= 1
signature = int(time.time())
neg_vocab = Vocabulary(signature=signature, name='negative')
pos_vocab = Vocabulary(signature=signature, name='positive')
vocab_dict = {0: neg_vocab, 1: pos_vocab} # label=0 stands for negative
for i, d in enumerate(cut_data):
vocab_dict[label[i]].update(d)
emotion_dict_ = {'neg': [], 'pos': []}
for word, strength in emotion_dict.items():
if strength <= 0:
emotion_dict_['neg'].append(word)
else:
emotion_dict_['pos'].append(word)
num_neg = max(int(num_aug * neg_aug_ratio), 1)
num_pos = max(num_aug - num_neg, 1)
aug_data = []
aug_label = []
neg_words = list(neg_vocab.voc.keys())
neg_words_prob = list(neg_vocab.voc.values())
neg_words_prob = np.array(neg_words_prob) / np.sum(neg_words_prob)
pos_words = list(pos_vocab.voc.keys())
pos_words_prob = list(pos_vocab.voc.values())
pos_words_prob = np.array(pos_words_prob) / np.sum(pos_words_prob)
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_neg)
for i in range(num_neg):
d_ = []
n_neg_opposite_vocab_words = int(ratio[1]*sents_len[i])
n_neg_related_vocab_words = int(ratio[2]*sents_len[i])
n_pos_emotion_words = sents_len[i] - n_neg_opposite_vocab_words - n_neg_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['pos'], replace=True, size=n_pos_emotion_words))
# words from related vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_neg_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_neg_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(2) # new label: fake
sents_len = np.random.randint(min_virtue_sent_len, max_virtue_sent_len, size=num_pos)
for i in range(num_pos):
d_ = []
n_pos_opposite_vocab_words = int(ratio[1]*sents_len[i])
n_pos_related_vocab_words = int(ratio[2]*sents_len[i])
n_neg_emotion_words = sents_len[i] - n_pos_opposite_vocab_words - n_pos_related_vocab_words
# words from emotion dict
d_.extend(np.random.choice(emotion_dict_['neg'], replace=True, size=n_neg_emotion_words))
# words from related vocab
d_.extend(np.random.choice(pos_words, replace=True, p=pos_words_prob, size=n_pos_related_vocab_words))
# words from opposite vocab
d_.extend(np.random.choice(neg_words, replace=True, p=neg_words_prob, size=n_pos_opposite_vocab_words))
vec_ = 0
actual_len = 0.
for word in d_:
try:
vec_ = vec_ + model[word]
actual_len += 1
except KeyError:
continue
if actual_len > 0:
aug_data.append(vec_/actual_len)
aug_label.append(2) # new label: fake
return aug_data, aug_label | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/augmentation_utils.py | augmentation_utils.py |
import pickle, os
from gensim.models import word2vec, KeyedVectors
import numpy as np
from sklearn.decomposition import PCA
from sklearn.externals import joblib
import jieba
import cx_Oracle
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
_backend = 'jieba'
try:
from jpype import *
startJVM(getDefaultJVMPath(), "-Djava.class.path=/home/hongjp/hanlp/hanlp-portable-1.3.4.jar:/home/hongjp/hanlp", "-Xms1g", "-Xmx1g") # 启动JVM,Linux需替换分号;为冒号:
HanLP = JClass('com.hankcs.hanlp.HanLP')
_backend = 'hanlp'
print('Using HanLP as Chinese sentence segmentation backend.')
except Exception as e:
print('Fail to load `HanLP`. Using `jieba` as default Chinese sentence segmentation backend.')
def load_data_from_excel(excel_file, config):
pass
def load_data_from_pickle(pickle_file):
with open(pickle_file, 'rb') as f:
data = pickle.load(f)
return data
def load_stopwords(sw_file, encoding='utf-8'):
with open(sw_file, 'r', encoding=encoding) as f:
stopwords = f.read().strip().split('\n')
return stopwords
def load_emotion_dict(emotion_dict_file):
return load_data_from_pickle(emotion_dict_file)
def segment(data):
def hanlp_cut(d):
cut = HanLP.segment(d)
t = [cut[i].word for i in range(len(cut))]
return t
if _backend == 'jieba':
return [jieba.lcut(d) for d in data]
else: # _backend = 'hanlp'
return [hanlp_cut(d) for d in data]
def train_test_split(X, y, ratio=0.8):
X = np.array(X, copy=False)
y = np.array(y, copy=False)
assert X.shape[0] == len(y)
N = X.shape[0]
N_train = int(N * ratio)
idx = np.arange(N)
np.random.shuffle(idx)
X_train = X[idx[:N_train]]
X_test = X[idx[N_train:]]
y_train = y[idx[:N_train]]
y_test = y[idx[N_train:]]
return (X_train, y_train), (X_test, y_test)
def compute_score(Y, predicted_Y, classes=[0, 1]):
recall = {}
precision = {}
F1 = {}
Y = np.array(Y, copy=False)
predicted_Y = np.array(predicted_Y, copy=False)
for key in classes:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 0.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 0.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
def print_score(score, title=None):
if title:
print('='*53)
# print('|' + ' '*51 + '|')
print('|{:^51s}|'.format(title[:51]))
# print('|' + ' '*51 + '|')
print('='*53)
print('|{:^12s}|{:^12s}|{:^12s}|{:^12s}|'.format('class', 'recall', 'precision', 'F1'))
for label in score['recall']:
print('|{:^12s}|{:^12s}|{:^12s}|{:^12s}|'.format(
str(label), '%.2f'%score['recall'][label], '%.2f'%score['precision'][label], '%.2f'%score['F1'][label]))
print('='*53)
def print_multi_scores(score_list, title=None):
N = len(score_list)
assert N > 1 # when N=1, use print_score instead, otherwise, alignment might be troublesome, for len('precision') > 6.
length = 12 + 5 + 6*N*3 + 2*3 # class_col + `|`*5 + score_col*N*3 + `/`*2*3
if title:
print('='*length)
print(('|{:^%ds}|'%(length-2)).format(title[:length]))
print('='*length)
format_ = '|{:^12s}|' + ('{:^6s}/'*(N-1) + '{:^6s}|') * 3
col_title_format = '|{:^12s}|' + '{:^%ds}|'%(N*6+2) * 3
print(col_title_format.format('class', 'recall', 'precision', 'F1'))
for label in score_list[0]['recall']:
data = [str(label)]
for obj in ['recall', 'precision', 'F1']:
for score in score_list:
data.append('%.2f'%score[obj][label])
print(format_.format(*data))
print('='*length)
class pca():
def __init__(self, n_components=100):
self.n_components = n_components
self._pca = PCA(n_components=self.n_components)
self._is_fitted = False
def fit(self, X):
self._pca.fit(X)
self._is_fitted = True
self.explained_variance_ratio_ = self._pca.explained_variance_ratio_
self.ratio_ = np.sum(self.explained_variance_ratio_[:self.n_components])
def transform(self, X):
if self._is_fitted:
return self._pca.transform(X)
else:
print('PCA has not yet been fitted. It would perform fitting on this data. ' + \
'If this is not what you want, check your code, and fit the model first.')
return self._pca.fit_transform(X)
def save(self, save_to):
joblib.dump(self._pca, save_to)
# @staticmethod
# def load(model_file):
# _pca = joblib.load(model_file)
# pca_ = pca(n_components=_pca.n_components_)
# pca_._pca = _pca
# pca_._is_fitted = True
# pca_.explained_variance_ratio_ = _pca.explained_variance_ratio_
# pca_.ratio_ = np.sum(pca_.explained_variance_ratio_[:pca_.n_components])
# return pca_
def fetch_data_from_oracle(connection, from_date, to_date, label_map=None):
print('Fetching data from remote oracle, this might take some time...')
query = '''select b.title,b.content_no_tag,b.orientation as relevance from cis_ans_basedata b inner join cis_ans_basedata_type t on (b.id=t.bid and t.delflag = 0 and (t.repeat=0 or t.repeat is null))
where B.Publish_Date > '%s' and B.Publish_Date < '%s' ''' % (from_date, to_date)
cursor = connection.cursor()
cursor.execute(query)
data = []
label = []
def convert(col):
if isinstance(col, cx_Oracle.LOB):
return col.read().decode('utf-8')
else:
return col
for i, record in enumerate(cursor):
if i % 1000 == 0:
print('.', end='', flush=True)
title = convert(record[0])
article = convert(record[1])
emotion = convert(record[2])
if article is None:
continue
else:
if emotion is None:
emotion = 1
if title is not None:
title = title.strip()
else:
title = ''
article = article.strip()
emotion = '负' if emotion == '2' else '非负'
if label_map:
emotion = label_map[emotion] # for example, convert '负', '非负' to 0, 1 respectively
data.append(title+'。'+article)
label.append(emotion)
connection.close()
print('.')
return data, label
def save_data(data, save_to_file):
with open(save_to_file, 'wb') as f:
pickle.dump(data, f) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/utils.py | utils.py |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
class Vocabulary(object):
def __init__(self, signature, min_word_len=2, name='voc'):
self.signature = signature
self.min_word_len = min_word_len
self.name = name
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.oov = None
self.size = 0
self._fixed_voc = False
def set_state(self, fixed=False):
assert fixed in [True, False, 0, 1]
self._fixed_voc = fixed
def get_state(self):
state = 'Fixed' if self._fixed_voc else 'Not fixed'
return state
def shuffle(self):
self.check_state()
idx = np.random.permutation(self.size)
shuffled_voc = dict()
shuffled_freq = dict()
shuffled_doc_freq = dict()
for key, id in self.voc.items():
shuffled_voc[key] = idx[id]
shuffled_freq[idx[id]] = self.freq[id]
shuffled_doc_freq[idx[id]] = self.doc_freq[id]
del self.voc, self.freq, self.doc_freq
self.voc, self.freq, self.doc_freq = shuffled_voc, shuffled_freq, shuffled_doc_freq
def _is_useless(self, x):
if len(x) < self.min_word_len:
return True
if x.strip('''#&$_%^*-+=<>`~!@(())??/\\[]{}—"';::;,。,.‘’“”|…\n abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890''') == '':
return True
return False
def update(self, words):
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support update.')
for word in words:
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is None: # new word
self.voc[word] = self.size
self.freq[self.size] = 1
self.doc_freq[self.size] = 0 # create doc_freq item
self.size += 1
else:
self.freq[id] += 1
for word in set(words):
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is not None:
self.doc_freq[id] += 1 # update doc_freq
def get(self, word):
return self.voc.get(word, self.oov)
def __getitem__(self, word):
return self.voc.get(word, self.oov)
# def __setitem__(self, word, val):
# self.voc.__setitem__(word, val)
def __contains__(self, word):
return self.voc.__contains__(word)
def __iter__(self):
return iter(self.voc)
def __sizeof__(self):
return self.voc.__sizeof__() + self.freq.__sizeof__() + self.signature.__sizeof__() + self.size.__sizeof__() + \
self.name.__sizeof__() + self._fixed_voc.__sizeof__() + self.oov.__sizeof__() + self.doc_freq.__sizeof__()
def __delitem__(self, word): # delete would destory the inner representation
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support deletion.')
else:
raise NotImplementedError
def get_size(self):
return self.size
def clear(self):
del self.voc, self.freq, self.doc_freq
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.size = 0
self._fixed_voc = False
def check_state(self):
return len(self.voc) == self.size and len(self.freq) == self.size and len(self.doc_freq) == self.size
def to_dict(self):
return self.voc
def set_signature(self, new_signature):
self.signature = new_signature
def remove(self, words_list):
size = 0
new_voc = {}
new_freq = {}
new_doc_freq = {}
for word in self.voc:
id = self.voc[word]
if word in words_list:
continue
else:
new_voc[word] = size
new_freq[size] = self.freq[id]
new_doc_freq[size] = self.doc_freq[id]
size += 1
self.size = size
self.voc = new_voc
self.freq = new_freq
self.doc_freq = new_doc_freq
def save(self, file_name=None):
save_to = (file_name if file_name else self.name)+'-%s.voc'%self.signature
with open(save_to, 'wb') as f:
pickle.dump([self.voc, self.freq, self.doc_freq, self.size, self.min_word_len, \
self.oov, self._fixed_voc, self.name, self.signature], f)
@classmethod
def load(cls, file_name):
with open(file_name, 'rb') as f:
[voc, freq, doc_freq, size, min_word_len, oov, _fixed, name, signature] = pickle.load(f)
voc_from_file = cls(signature, name)
voc_from_file.voc = voc
voc_from_file.freq = freq
voc_from_file.doc_freq = doc_freq
voc_from_file.size = size
voc_from_file.min_word_len = min_word_len
voc_from_file.oov = oov
voc_from_file._fixed_voc = _fixed
voc_from_file.signature = signature
return voc_from_file
class TfidfTransf():
def __init__(self, signature, vocab=None, transformer_type='tfidf', transformer_norm='l2', vocab_name='vocab', min_word_len=2):
self._type = transformer_type.lower()
self._norm = transformer_norm.lower()
self.signature = signature
self.vocab_name = vocab_name
self.min_word_len = min_word_len
self.cv = None
self.transformer = None
if vocab:
if isinstance(vocab, Vocabulary):
self.vocab = vocab
else:
raise TypeError('Vocab needs input of type `Vocabulary`, but got %s.' % (type(vocab)))
else:
self.vocab = Vocabulary(signature, name=self.vocab_name, min_word_len=self.min_word_len)
def update_vocab(self, data):
for doc in data:
self.vocab.update(doc)
def set_state(self, fixed=False):
self.vocab.set_state(fixed=fixed)
def remove_from_vocab(self, words_or_vocab):
self.vocab.remove(words_or_vocab)
def fit(self, data):
if self.vocab.get_size() == 0:
print('Warning: Vocabulary is not yet built. It would built on this data.' + \
' If this is what you want, please update vocab first.')
self.update_vocab(data)
self.vocab.set_state(fixed=True)
self.cv = CountVectorizer(decode_error='replace', vocabulary=self.vocab.to_dict())
if self._type == 'tf':
self.transformer = TfidfTransformer(norm=self._norm, use_idf=False)
else:
self.transformer = TfidfTransformer(norm=self._norm, use_idf=True)
return self.transformer.fit(self.cv.transform(data))
def transform(self, data):
if self.transformer and self.cv:
return self.transformer.transform(self.cv.transform(data))
else:
print('Warning: The transformer has not yet been fitted. It would fit on this data.' + \
'If this is not you want, please fit it first.')
self.fit(data)
return self.transform(data)
def save(self, save_to):
joblib.dump([self.vocab, self.cv, self.transformer], save_to)
@staticmethod
def load(cls, model_file):
vocab, cv, transformer = joblib.load(f)
model = cls(signature=vocab.signature, vocab=vocab)
model.cv = cv
model.transformer = transformer
return model | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/sentiment_analysis/utils/tfidf_utils.py | tfidf_utils.py |
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from ner.extract import Extract
import pandas as pd
import traceback
import json
import os
DEBUG = False
PORT = 8018
HOST = '0.0.0.0'
# HOST = '127.0.0.1'
app = Flask(__name__)
file_path = os.path.dirname(os.path.realpath(__file__))
txt_path = r'./data/feature_dict.txt'
province_path = r'./data/province.txt'
country_path = 'data/国家名称.xlsx'
country_df = pd.read_excel(os.path.join(file_path,country_path), header=None)[0].tolist()
country_df.remove('中国')
extract = Extract(country=country_df)
province = extract.read_txt(os.path.join(file_path, province_path))
money_feature = extract.read_txt(os.path.join(file_path,txt_path))
address_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/address_filter.txt'))
capacity_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/capacity_filter.txt'))
entity_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/entity_filter.txt'))
money_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/money_filter.txt'))
project_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/project_filter.txt'))
state_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/state_filter.txt'))
time_filter = extract.read_txt(os.path.join(file_path,r'./data/filter/time_filter.txt'))
address_pattern = extract.read_txt(os.path.join(file_path,'./data/pattern/address_pattern.txt'))
capacity_pattern = extract.read_txt(os.path.join(file_path,'./data/pattern/capacity_pattern.txt'))
money_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/money_pattern.txt'))
state_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/state_pattern.txt'))
time_pattern = extract.read_txt(os.path.join(file_path,r'./data/pattern/time_pattern.txt'))
state_no_words = extract.read_txt(os.path.join(file_path,r'./data/filter/state_no_words.txt'))
model = HanLP.newSegment('crf').enableOrganizationRecognize(True)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
def get_return_info(money_results=None, address_results=None, capacity_results=None,jia=None, yi=None, project_results=None, country_results=None, state_results=None, time_results=None):
return json.dumps({'项目金额': money_results, '项目地址': address_results, '设计产能': capacity_results, '执行机构':jia ,'企业':yi,'项目名称': project_results,
'涉及国家':country_results, '项目状态':state_results, '项目周期':time_results}, ensure_ascii=False)
@app.route('/extract/', methods=('GET', 'POST'))
def get_prediction():
title =str(request.form.get('title'))
text = request.form.get('text')
print(title)
if len(title)==0:
print('文章标题为空')
return get_return_info('文章标题为空')
if len(text)==0:
print('文章内容为空')
return get_return_info('文章内容为空')
if (title is None) and (text is not None):
print('文章标题为空')
return get_return_info('文章标题为空')
if (title is not None) and (text is None):
print('文章内容为空')
return get_return_info('文章内容为空')
try:
global_sentences = extract.segment_para(para=text)
money_results = extract.money_pattern(global_sentences=global_sentences, filter_list=money_filter, money_pattern=money_pattern, money_feature=money_feature)
address_results = extract.address_pattern(global_sentences=global_sentences, filter_list=address_filter, address_pattern=address_pattern)
capacity_results = extract.capacity_pattern(global_sentences=global_sentences, filter_list=capacity_filter, capacity_pattern=capacity_pattern)
jia, yi = extract.org_patterns(global_sentences=global_sentences, filter_list=entity_filter, province=province, model=model)
# org_results.append(jia)
# org_results1.append(yi)
project_results = extract.pro(global_sentences=global_sentences, title=title, objects=project_filter, filter_list=project_filter)
country_results = extract.country_pattern(title=title, para=text)
state_results = extract.state(global_sentences=global_sentences, title=title, filter_list=state_filter, state_pattern=state_pattern, state_no_words= state_no_words)
time_results = extract.time1(global_sentences=global_sentences, filter_list=time_filter, time_pattern=time_pattern)
except:
return get_return_info(traceback.print_exc())
return get_return_info(money_results, address_results, capacity_results, jia, yi, project_results, country_results, state_results, time_results)
# if __name__ == '__main__':
app.run(debug=DEBUG, host=HOST, port=PORT) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/ner/app.py | app.py |
import pandas as pd
import re
import jieba.posseg as pseg
import emoji
import time as time_time
import os
import difflib
from pyhanlp import *
class Extract:
def __init__(self, country):
self.country = country
def read_txt(self, filenames):
# r'./feature_dict.txt'
lines = []
f = open(filenames,'r', encoding='utf-8')
for line in f.readlines():
lines.append(line.strip('\n'))
return lines
def segment_para(self, para):
split_pattern = re.compile(r'\n|。|?|!|\?|\!')
global_sentences = split_pattern.split(emoji.demojize(str(para)))
global_sentences = [str(i)+'。' for i in global_sentences]
return global_sentences
def filter_para(self, filter_list, sentence):
phrase = ''
for i in filter_list:
if sentence.count(i) != 0:
phrase = sentence
break
return phrase
def money_pattern(self, global_sentences, filter_list, money_pattern, money_feature):
# split_pattern = re.compile(r'\,\;')
sentences, money = [], []
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentences = [i for i in sentences if len(i) != 0]
for i in sentences:
index = 0
psg = ''
words = []
for term in HanLP.segment(i):
if term.word in money_feature:
psg += 'E'
else:
psg += str(term.nature)
words.append(term.word)
psg += str(index)
index += 1
for pattern_str in money_pattern:
pattern = re.compile(r'' + pattern_str)
rules = re.finditer(pattern, psg)
for j in rules:
num = re.sub(r'\D', ' ', (j.group()))
num = num.strip()
start = int(num.split(' ')[0]) + 1
end = int(num.split(' ')[-1]) + 1
money.append(''.join(words[start:end]))
money = [index for index in money if len(index) != 0]
money = list(set(money))
print(money)
return ','.join(money)
def write_excel(self, filename, df):
if os.path.exists(os.path.dirname(filename)) ==False:
os.mkdir(os.path.dirname(filename))
# df[column] = data
xlsx_content = pd.ExcelWriter(filename, engine='xlsxwriter')
df.to_excel(xlsx_content, sheet_name='Sheet1')
xlsx_content.close()
def project_pattern(self):
pattern = re.compile(r'([\。\!\!\:\:\ \丨]|)[a-zA-Z0-9\u4e00-\u9fa5]+((\u9879\u76ee)|(\u5de5\u7a0b)|(\u96a7\u9053))')
charter = ['。','!','!',':',':',',',' ','【','】',',']
# for i in range(len(data)):
# sent = data[i]
sent = self.title
try:
f1 = re.finditer(pattern, sent)
f1.__next__()
except StopIteration:
pattern = re.compile(r'([\。\!\!\:\:\ \丨]|)[a-zA-Z0-9\u4e00-\u9fa5]+((\u9879\u76ee)|(\u5de5\u7a0b))')
sent = self.para
f1 = re.finditer(pattern, sent[:800])
project = []
for index in f1:
start = index.span()[0]
end = index.span()[1]
before = sent[start:end].strip(' ')
after = re.sub('一带一路项目', '', before)
if len(after) != 0:
if after[0] in charter:
after = after.replace(after[0], '')
if len(after) > 2:
if (len(before) < 4) or ('个项目' in before):
pass
else:
project.append(after)
projects = list(set(project))
projects.sort(key=project.index)
return ','.join(projects[:2])
def address_pattern(self, global_sentences, filter_list, address_pattern):
# pattern = re.compile(r'')
# pattern1 = re.compile(r'')
address = []
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentences = [index for index in sentences if len(index) != 0]
for i in sentences:
for pattern_str in address_pattern:
pattern = re.compile(r'' + pattern_str)
f= re.finditer(pattern, i)
for index in f:
start = index.span()[0]
end = index.span()[1]
before = i[start:end - 1].strip()
after = re.sub('\d','',before)
if len(before)-len(after) < 6:
address.append(''.join(before))
address = list(set(address))
print(address)
# address.sort(key=addr.index)
return ','.join(address)
def capacity_pattern(self, global_sentences, filter_list, capacity_pattern):
# pattern = re.compile(r'')
# pattern1 = re.compile(r'')
# pa = [pattern, pattern1]
capacity = []
for index in global_sentences:
index = self.filter_para(filter_list=filter_list, sentence=index)
if len(index) != 0:
for pattern_str in capacity_pattern:
pattern = re.compile(r'' + pattern_str)
f1 = re.finditer(pattern, index)
for i in f1:
start = i.span()[0]
end = i.span()[1]
if bool(re.search('\d+', i.group())):
capacity.append(''.join(index[start + 1:end - 1]))
capacity = list(set(capacity))
print(capacity)
return ','.join(capacity)
def country_pattern(self, title, para):
country_dict = {}
country_result = ''
paragraph = title + para
for i in self.country:
num = paragraph[:5000].count(i)
country_dict[i] = num
cou = max(country_dict.values())
if cou != 0:
country_result = ','.join([k for k, v in country_dict.items() if v == cou])
else:
for i in self.country:
num = paragraph[5000:].count(i)
country_dict[i] = num
cou = max(country_dict.values())
if cou != 0:
country_result = ','.join([k for k, v in country_dict.items() if v == cou])
return country_result
def org_pattern(self, global_sentences):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!')
# sentences = split_pattern.split(self.para)
province = self.read_txt(self.filename_province)
all_list = []
first_list, second_list = [], []
j = 0
def extactCompany(tree):
words, word1, word2= '', '', ''
list1, list2 = [], []
id = 0
for word in tree.iterator():
if word.HEAD.POSTAG == 'nt' and word.DEPREL == '定中关系' and ((word.HEAD.DEPREL == '定中关系') or (word.HEAD.DEPREL == '主谓关系')):
id = word.ID
word3 = word.LEMMA + word.HEAD.LEMMA
list1.append(word3)
if word.POSTAG == 'nt'and ((word.DEPREL == '定中关系') or (word.DEPREL == '主谓关系')):
if word.ID - id != 1:
words = word.LEMMA
word1 = word.HEAD.LEMMA
list1.append(words)
if word.HEAD.LEMMA == words and word.DEPREL == '并列关系' and word.POSTAG == 'nt':
list2.append(word.LEMMA)
else:
if word.HEAD.LEMMA == word1 and word.DEPREL == '状中结构':
word2 = word.LEMMA
if word.HEAD.LEMMA == word2 and word.DEPREL == '介宾关系' and word.POSTAG == 'nt':
list2.append(word.LEMMA)
return list1, list2
for index in global_sentences:
if ('签' in index) or ('中标' in index) or ('项目' in index):
index += '。'
model = HanLP.newSegment('crf').enableOrganizationRecognize(True)
org_list = model.seg(str(index))
for item in org_list:
if (str(item.nature) == 'nt'):
tree = HanLP.parseDependency(index)
list1, list2 = extactCompany(tree)
all_list.extend(list1)
all_list.extend(list2)
for i in all_list:
if (i[:2] in province) or (i[:3] in province):
second_list.append(i)
else:
first_list.append(i)
return ','.join(list(set(first_list))), ','.join(list(set(second_list)))
def org_patterns(self, global_sentences, filter_list, province, model):
sentences = [self.filter_para(filter_list=filter_list, sentence=index) for index in global_sentences]
sentence = ''
for index in sentences:
if len(index) != 0:
sentence += index
org, first_list, second_list = [], [], []
org_list = model.seg(str(sentence))
org_list = list(org_list)
for item in org_list:
if ((str(item.nature) == 'nt') or (str(item.nature) == 'ntc')) and ('银行' not in str(item.word)):
num = org_list.index(item)
word = item.word
if str(org_list[num-1].nature) == 'ns':
word = org_list[num-1].word + item.word
org.append(word)
org = list(set(org))
for i in org:
i = i.strip()
if (i[:1] in province) or (i[:2] in province) or (i[:3] in province):
second_list.append(i)
else:
first_list.append(i)
print(first_list)
print(second_list)
return ','.join(list(set(first_list))), ','.join(list(set(second_list)))
def pro(self, global_sentences, title, objects, filter_list):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!|\s|;')
# sentences = split_pattern.split(self.para)
list_re = []
title_two = ''
def string_similar(str1, str2):
return difflib.SequenceMatcher(None, str1, str2).quick_ratio()
# sentences = hanlp.utils.rules.split_sentence(para)
def value(id, id1, filter, tree):
words = ''
if (id != 0) and (id1 != id):
for word in tree.iterator():
if word.ID >= id and word.ID <= id1:
if word.CPOSTAG in filter:
words = ''
break
else:
words += word.LEMMA
if len(words) != 0:
return words.split()[-1]
else:
return words
#规则1:根据动词和所抽取实体首词以及首词的粗粒度词性总结
def one_pattern(tree, objects):
verb = ['签署','签订', '完成', '支援', '援助', '中标', '建设']
word_cpostag = ['ns', 'vg', 'nz', 'nh', 'ni']
list1= []
id, id0, id1, id_end = 0, 0, 0, 0
for word in tree.iterator():
# print(tree)
if id == 0:
if word.LEMMA in verb:
id = word.ID + 1
id0 = word.ID + 2
if id != 0:
if word.ID == id:
if word.CPOSTAG in word_cpostag:
id_end = id
elif word.ID == id0:
if word.CPOSTAG in word_cpostag:
id_end = id0
if (id_end != 0) and (word.LEMMA in objects):
id1 = word.ID
break
words = value(id=id_end, id1=id1, filter = ['v', 'u'], tree=tree)
if len(words) != 0:
list1.append(words)
return list1
#规则2:
def two_pattern(tree, objects):
id, id1 = 0, 0
list2 = []
word_cpostag = ['ns', 'Vg', 'nz', 'nh', 'ni']
for word in tree.iterator():
if word.DEPREL == '定中关系' and word.CPOSTAG in word_cpostag :
id = word.ID
# print(id)
if (word.LEMMA in objects) and (id != 0):
id1 = word.ID
# print(id1)
break
words = value(id=id, id1=id1, filter = [ 'wp', 'u'], tree=tree)
if len(words) != 0:
list2.append(words)
return list2
#规则三
def three_pattern(tree, objects):
id, id1 = 0, 0
list3 = []
word_cpostag = ['ns', 'Vg', 'nh', 'ni']
for word in tree.iterator():
if word.CPOSTAG in word_cpostag :
id = word.ID
# print(id)
if (word.LEMMA in objects) and (id != 0) and (word.HEAD.CPOSTAG == 'v'):
id1 = word.ID
# print(id1)
break
words = value(id=id, id1=id1, filter = ['v', 'wp', 'u'], tree=tree)
if len(words) != 0:
list3.append(words)
return list3
tree = HanLP.parseDependency(title)
title_two = two_pattern(tree, objects)
list_re.extend(title_two)
sentences = [self.filter_para(filter_list=filter_list, sentence=i)for i in global_sentences]
sentences = [i for i in sentences if len(i) != 0 ]
# print(sentences)
for index in sentences:
# if len(index) != 0:
# index += '。'
# print(index)
tree = HanLP.parseDependency(index)
para_three = three_pattern(tree ,objects)
list_re.extend(para_three)
# if len(title_two) == 0:
para_one = one_pattern(tree, objects)
list_re.extend(para_one)
list_re = list(set(list_re))
for i in range(len(list_re)-1, 0, -1):
sum = string_similar(list_re[0], list_re[i])
if sum >= 0.8:
list_re.pop(i)
return ','.join(list_re)
def time1(self, global_sentences, filter_list, time_pattern):
# split_pattern = re.compile(r'\n|。|?|!|\?|\!')
num = []
sentences = [self.filter_para(filter_list=filter_list, sentence=para) for para in global_sentences]
for phrase in sentences:
if len(phrase) != 0:
run = False
for pattern_str in time_pattern:
pattern = re.compile(r'' + pattern_str)
num1 = re.finditer(pattern, phrase)
for index in num1:
start = re.finditer('\d+', index.group())
for i in start:
start = i.span()[0]
num.append(index.group()[start:])
run = True
break
if run:
break
time = list(set(num))
print(time)
return ','.join(time)
def state(self, global_sentences, title, filter_list, state_pattern, state_no_words):
state = []
states = False
year = time_time.asctime(time_time.localtime(time_time.time()))[-4:]
def isyears(string):
years = re.finditer('(\d){4}', string)
return [index.group() for index in years]
start1 = state_pattern.index('建设阶段')
start2 = state_pattern.index('运营阶段')
start3 = state_pattern.index('完成阶段')
sentences = [self.filter_para(filter_list=filter_list, sentence=i) for i in global_sentences]
if len(state) == 0:
for para in sentences:
if len(para) != 0:
for term in HanLP.segment(para):
if str(term.nature) == 'ns' or str(term.nature) == 'nt' or str(term.nature) == 'ntc' or str(term.nature) == 'nsf' or str(term.nature) == 'm':
states = True
if states:
for pattern_str in state_pattern[start1+1:start2]:
pattern = re.compile(r'' + pattern_str)
result = re.finditer(pattern, para)
for item in result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('建设阶段')
# print(word)
# print(para)
states = False
else:
if years[-1] == year:
state.append('建设阶段')
states = False
break
if states:
for pattern_str2 in state_pattern[start2+1:start3]:
pattern_two = re.compile(pattern_str2)
two_result = re.finditer(pattern_two, para)
for item in two_result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('运营阶段')
states = False
else:
if years[-1] > year:
state.append('建设阶段')
states = False
if years[-1] == year:
state.append('运营阶段')
states = False
break
if states:
for pattern_str3 in state_pattern[start3+1:]:
pattern_three = re.compile(pattern_str3)
three_result = re.finditer(pattern_three, para)
for item in three_result:
word = item.group()
if len(word) != 0:
if len(self.filter_para(filter_list=state_no_words, sentence=word)) == 0:
years = isyears(word)
if len(years) == 0:
state.append('完成阶段')
else:
if years[-1] <= year:
state.append('完成阶段')
break
state = list(set(state))
if len(state) == 0:
state.append('建设阶段')
print(state)
return ','.join(state)
# if __name__ == '__main__':
# txt_path = r'./data/feature_dict.txt'
# province_path = r'./province.txt'
# country_path = r'./data/国家名称.xls'
# excel_path = r'C:\Users\lenovo\Desktop\一带一路_0706.xls'
# country_df = pd.read_excel(country_path, header= None)[0].tolist()
# country_df.remove('中国')
# # nlp = StanfordCoreNLP(r'E:\迅雷下载\stanford-corenlp-latest\stanford-corenlp-4.1.0', lang='zh')
# df = pd.read_excel(excel_path)[:200]
# money_results = []
# project_results = []
# project_results1 = []
# address_results =[]
# capacity_results =[]
# country_results = []
# org_results = []
# org_results1 = []
# time_results = []
# state_results = []
# for i in range(len(df['内容'])):
# print('============================================================'+ str(i))
# extract = Extract(country=country_df)
# province = extract.read_txt(province_path)
# money_feature = extract.read_txt(txt_path)
# address_filter = extract.read_txt(r'./data/filter/address_filter.txt')
# capacity_filter = extract.read_txt(r'./data/filter/capacity_filter.txt')
# entity_filter = extract.read_txt(r'./data/filter/entity_filter.txt')
# money_filter = extract.read_txt(r'./data/filter/money_filter.txt')
# project_filter = extract.read_txt(r'./data/filter/project_filter.txt')
# state_filter = extract.read_txt(r'./data/filter/state_filter.txt')
# state_no_words = extract.read_txt(r'./data/filter/state_no_words.txt')
# time_filter = extract.read_txt(r'./data/filter/time_filter.txt')
# address_pattern = extract.read_txt('./data/pattern/address_pattern.txt')
# capacity_pattern = extract.read_txt('./data/pattern/capacity_pattern.txt')
# money_pattern = extract.read_txt(r'./data/pattern/money_pattern.txt')
# state_pattern = extract.read_txt(r'./data/pattern/state_pattern.txt')
# time_pattern = extract.read_txt(r'./data/pattern/time_pattern.txt')
# global_sentences = extract.segment_para(para=df['内容'][i])
# money_results.append(extract.money_pattern(global_sentences=global_sentences, filter_list = money_filter, money_pattern = money_pattern, money_feature=money_feature))
# # project_results.append(extract.project_pattern())
# address_results.append(extract.address_pattern(global_sentences=global_sentences, filter_list = address_filter, address_pattern = address_pattern))
# capacity_results.append(extract.capacity_pattern(global_sentences=global_sentences, filter_list = capacity_filter, capacity_pattern = capacity_pattern))
# # print(df['内容'][475])
# jia, yi = extract.org_patterns(global_sentences = global_sentences, filter_list = entity_filter, province=province)
# org_results.append(jia)
# org_results1.append(yi)
# project_results1.append(extract.pro(global_sentences=global_sentences, title=df['标题'][i], objects = project_filter, filter_list=project_filter))
# country_results.append(extract.country_pattern(title=df['标题'][i], para=df['内容'][i]))
# state_results.append(extract.state(global_sentences=global_sentences, title=df['标题'][i], filter_list = state_filter, state_pattern = state_pattern, state_no_words= state_no_words))
# time_results.append(extract.time1(global_sentences=global_sentences, filter_list = time_filter, time_pattern = time_pattern))
# df['合同金额'] = money_results
# # df['项目名称1'] = project_results
# df['项目名称'] = project_results1
# df['项目位置'] = address_results
# df['项目产能'] = capacity_results
# df['国家'] = country_results
# df['企业识别甲方'] = org_results
# df['企业识别乙方'] = org_results1
# df['项目周期'] = time_results
# df['项目状态'] = state_results
# # extract.write_excel('./result/合同信息抽取.xlsx', df)
# df.to_excel('./result/合同信息抽取.xlsx', columns=['标题', '内容', '原文链接', '合同金额', '项目名称', '项目位置', '项目产能','国家', '企业识别甲方', '企业识别乙方', '项目周期', '项目状态'])
# # jpype._jclass.ArrayIndexOutOfBoundsException: java.lang.ArrayIndexOutOfBoundsException: 5777
# # nlp.close() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/ner/extract.py | extract.py |
import re
import pandas as pd
import json
import emoji
a = r'<h1>青岛双星控股股东双星集团</h1><p>响了青岛市属国有企业混改第一枪……10月9日,青岛双星<span style="font-size: 24px;">股价应声涨停,显示了市场对于这一举动的期待。</span></p><p><span style="font-size: 24px;">作为国资大省,山东省国有企业三年混改计划和青岛市国有企业改革正<span style="font-family: 隶书, SimLi; font-size: 24px;">步入深水区,双星集</span></span><span style="font-family: 隶书, SimLi;">团一级企业层面混改的启动,或掀起新一轮山东国企改革浪潮。值得注意的是,与此前的混改更多在二级、三级子公司层面相比,此次混改进一步深化,企业集团层面的混改成为国企改革攻坚重点合法权益得不到充分保护 ●由于国有企业和民营企业文化理念不同,双方混合后在管理方式、具体操作等方面存在矛盾,向现代企业制度转轨比较艰难 融合之路 ●省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业 ●研究建立以资本收益为主的考核指标体系,支持混改企业按市场化原则进合法权益得不到充分保护 ●由于国有企业和民营企业文化理念不同,双方混合后在管理方式、具体操作等方面存在矛盾,向现代企业制度转轨比较艰难 融合之路 ●省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业 ●研究建立以资本收益为主的考核指标体系,支持混改企业按市场化原则进</span>。打响了青岛市属国有企业混改第一枪。10月9日,青岛双星<span style="font-size: 24px;">股价应声涨停,显示了市场对于这一举动的期待。</p><h1>双星集团的混改实验</h1><p>省属企业新投资项目,原则上投资主体必须是现有混合所有制企业或新引进非国有资本合作企业</p>'
def filter_emoji(context):
#过滤表情
chars = ''
text = emoji.demojize(context)
for i in range(9636, 11217):
chars += chr(i)
chars = '[' + chars
chars = chars + ']'
rules = re.compile(chars)
text = rules.sub('。', text)
return text
def clean_tag(context):
rule = re.compile('</h[0-9]+>', re.S)
context = rule.sub('\n', context)
rule1 = re.compile('</p>', re.S)
context = rule1.sub('\n', context)
rules = re.compile('<[^>]+>', re.S)
text = rules.sub('', context)
text = filter_emoji(text)
text = text.split('\n')
data = []
for i in text:
data.append((i,text.index(i)))
return data
def split_sentence(tup):
index1 = tup[1]
context = tup[0]
context = re.sub('([。!?\?])([^”’])', r"\1\n\2", context) # 单字符断句符
context = re.sub('(\.{6})([^”’])', r"\1\n\2", context) # 英文省略号
context = re.sub('(\…{2})([^”’])', r"\1\n\2", context) # 中文省略号
context = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', context)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = context.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split('\n')
data = []
for i in sentences:
data.append((i,index1))
return data
def text_process(context):
data = clean_tag(context)
text = []
for index in data:
text.extend(split_sentence(index))
# context = map(lambda x: split_sentence(x), df['text'])
return text
print(text_process(a)) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/de_duplication/process.py | process.py |
import jieba
import jieba.posseg as pseg
from relativeness_analysis.vocabulary import Vocabulary
from relativeness_analysis.classifier2 import xgboost
import xlrd, xlwt
import os, sys
import argparse
import numpy as np
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
# import sys
# reload(sys)
# sys.setdefaultencoding('utf-8')
####################################### 参数区 ########################################################
# 1. model path
file_path = os.getcwd()
model_path = os.path.join(file_path, 'classifier') # 模型所在的目录,请最好不要在该目录下放其他文件,以免产生错误识别
vocab_path = os.path.join(file_path, 'vocab') # 词典所在的目录,词典与模型相对应,请最好不要在该目录下放其他文件,以免产生错误识别
####################################### 代码区 ########################################################
def find_vocab(vocab_folder):
files = os.listdir(vocab_folder)
candidate_vocab = {}
for file in files:
tmp = '.'.join(file.split('.')[:-1]).split('-')
if len(tmp) == 3: # vocab-all-1491195238.voc
v, company, signature = tmp
if v == 'vocab':
if candidate_vocab.get(company, None) is None:
candidate_vocab[company] = dict()
candidate_vocab[company][signature] = file
return candidate_vocab
def find_clf(clf_folder):
files = os.listdir(clf_folder)
candidate_clf = {}
for file in files:
tmp = '.'.join(file.split('.')[:-1]).split('-') # # xgboost-all-tf-l1-l2-0.4-1491195238.clf
if len(tmp) == 7: # xgboost-all-tf-l1-l2-0.4-1491195238
c, company, transformer, penalty, norm, thres, signature = tmp
if c == 'xgboost':
if candidate_clf.get(company, None) is None:
candidate_clf[company] = dict()
candidate_clf[company][signature] = (file, transformer, penalty, norm, thres)
return candidate_clf
def match_and_load(candidate_model_file, candidate_vocab_file, model_folder, vocab_folder):
model = dict()
for company in candidate_model_file: # Based on model's key instead of vocab's
tmp_model = candidate_model_file[company]
tmp_vocab = candidate_vocab_file.get(company, None)
if tmp_vocab is not None:
for signature in set(tmp_model.keys()).intersection(tmp_vocab.keys()):
tmp = model.get(company, None)
if tmp is not None:
if int(signature) > int(model[company][0]): # a model created more recently
model[company] = (signature, tmp_model[signature][0], tmp_vocab[signature]) + tmp_model[signature][1:]
else:
model[company] = (signature, tmp_model[signature][0], tmp_vocab[signature]) + tmp_model[signature][1:]
loaded_model = dict()
for company in model:
signature, model_file_name, vocab_file_name, transformer, penalty, norm, thres = model[company]
clf = xgboost.load(os.path.join(model_folder, model_file_name))
clf.thres = float(thres)
vocab = Vocabulary.load(os.path.join(vocab_folder, vocab_file_name))
loaded_model[company] = (signature, clf, vocab, transformer, penalty, norm, thres)
return loaded_model
# # countvectorizer and tfidftransformer
# def create_transformer(model):
# transformer = dict()
# for company in model:
# cv = CountVectorizer(decode_error='replace', vocabulary=model[company][2].to_dict())
# use_idf = True if model[company][3].lower() == 'tfidf' else False
# tfidf = TfidfTransformer(norm=model[company][-2], use_idf=use_idf)
# transformer[company] = lambda data: tfidf.transform(cv.transform(data))
# return transformer
# 查找模型和字典文件
candidate_model_file = find_clf(model_path)
candidate_vocab_file = find_vocab(vocab_path)
if len(candidate_vocab_file) == 0 or len(candidate_model_file) == 0:
raise Exception(u'没有找到训练好的模型和词典文件!')
print(candidate_model_file, candidate_vocab_file)
model = match_and_load(candidate_model_file, candidate_vocab_file, model_path, vocab_path)
# transformer = create_transformer(model)
def read_file_for_eval(path, idx_dict):
xlrd.book.unpack_SST_table.__globals__["unicode"] = lambda s, e: unicode(s, e, errors="replace")
book = xlrd.open_workbook(path, encoding_override="utf-8")
sheet = book.sheet_by_index(0)
content_begin_with = idx_dict['content_begin_with']
article_col = idx_dict['article_col']
title_col = idx_dict['title_col']
topic_col = idx_dict['topic_col']
articles = sheet.col_values(article_col, start_rowx=content_begin_with)
titles = sheet.col_values(title_col, start_rowx=content_begin_with)
topics = sheet.col_values(topic_col, start_rowx=content_begin_with)
data = {}
for i, article in enumerate(articles):
if sys.version_info.major == 2:
topic = topics[i].encode('utf-8').strip()
data[i] = [titles[i].encode('utf-8').strip() + '。' + article.encode('utf-8').strip(), topic]
else:
topic = topics[i].strip()
data[i] = [titles[i].strip() + '。' + article.strip(), topic]
return data
def test(text, company):
# global count_vect, tf_transformer
if company not in model:
return '不支持的企业'
if text == '。':
return '删除'
# if choose_tag[company]:
# processed_text = ' '.join([w for w, flag in pseg.cut(text) if flag in \
# ['n', 'ns', 'nt', 'nz', 'nl', 'ng', 'v', 'vd', 'vn', 'vf', 'vx', \
# 'vi', 'vl', 'vg', 'a', 'ad', 'an', 'ag', 'al', 'd']])
# else:
processed_text = ' '.join([w for w in jieba.lcut(text)])
cv = CountVectorizer(decode_error='replace', vocabulary=model[company][2].to_dict())
tfidf_trans = TfidfTransformer(norm=model[company][-2], use_idf=False)
counts = cv.transform([processed_text])
tfidf = tfidf_trans.transform(counts)
if tfidf.size == 0:
return '删除'
thres = float(model[company][-1])
clf = model[company][1]
predicted_label = clf.predict(tfidf, return_real_label=True)[0]
return predicted_label
def main(file_path, _all=False, prefix='./'):
result_file = '.'.join(os.path.basename(file_path).split('.')[:-1]) + '.xls'
result_file = os.path.join(prefix, result_file)
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('result')
sheet.write(0, 0, "标题+内容")
sheet.write(0, 1, "企业")
sheet.write(0, 2, "相关性")
if _all:
sheet.write(0, 3, "备注:不区分企业")
else:
sheet.write(0, 3, "备注:区分企业")
idx_dict = {}
idx_dict['content_begin_with'] = 1 # 样本从那一行开始,第0行为标注,第1行开始是样本
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
data = read_file_for_eval(file_path, idx_dict)
for i in data:
text = data[i][0]
company = data[i][1]
if _all:
relevant = test(text, 'all')
else:
relevant = test(text, company)
sheet.write(i+1, 0, text)
sheet.write(i+1, 1, company)
sheet.write(i+1, 2, relevant)
workbook.save(result_file)
return result_file
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='判断语料与企业的相关性')
parser.add_argument('-file', type=str, required=True,
help='待判断excel文件路径')
parser.add_argument('-all', type=int, default=1,
help='是否区分企业,默认为不区分企业')
parser.add_argument('-prefix', type=str, default='./',
help='判断结果输出到哪个目录下,默认为当前目录')
args = parser.parse_args()
print(args.file, args.all, args.prefix)
main(args.file, _all=args.all, prefix=args.prefix) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/relevant_analysis.py | relevant_analysis.py |
import pickle
import numpy as np
class Vocabulary(object):
def __init__(self, signature, min_word_len=2, name='voc'):
self.signature = signature
self.min_word_len = min_word_len
self.name = name
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.oov = None
self.size = 0
self._fixed_voc = False
def set_state(self, fixed=False):
assert fixed in [True, False, 0, 1]
self._fixed_voc = fixed
def get_state(self):
state = 'Fixed' if self._fixed_voc else 'Not fixed'
return state
def shuffle(self):
self.check_state()
idx = np.random.permutation(self.size)
shuffled_voc = dict()
shuffled_freq = dict()
shuffled_doc_freq = dict()
for key, id in self.voc.items():
shuffled_voc[key] = idx[id]
shuffled_freq[idx[id]] = self.freq[id]
shuffled_doc_freq[idx[id]] = self.doc_freq[id]
del self.voc, self.freq, self.doc_freq
self.voc, self.freq, self.doc_freq = shuffled_voc, shuffled_freq, shuffled_doc_freq
def _is_useless(self, x):
if len(x) < self.min_word_len:
return True
if x.strip('''#&$_%^*-+=<>`~!@(())??/\\[]{}—"';::;,。,.‘’“”|…\n abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890''') == '':
return True
return False
def update(self, words):
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support update.')
for word in words:
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is None: # new word
self.voc[word] = self.size
self.freq[self.size] = 1
self.doc_freq[self.size] = 0 # create doc_freq item
self.size += 1
else:
self.freq[id] += 1
for word in set(words):
if not self._is_useless(word):
id = self.voc.get(word, None)
if id is not None:
self.doc_freq[id] += 1 # update doc_freq
def get(self, word):
return self.voc.get(word, self.oov)
def __getitem__(self, word):
return self.voc.get(word, self.oov)
# def __setitem__(self, word, val):
# self.voc.__setitem__(word, val)
def __contains__(self, word):
return self.voc.__contains__(word)
def __iter__(self):
return iter(self.voc)
def __sizeof__(self):
return self.voc.__sizeof__() + self.freq.__sizeof__() + self.signature.__sizeof__() + self.size.__sizeof__() + \
self.name.__sizeof__() + self._fixed_voc.__sizeof__() + self.oov.__sizeof__() + self.doc_freq.__sizeof__()
def __delitem__(self, word): # delete would destory the inner representation
if self._fixed_voc:
raise Exception('Fixed vocabulary does not support deletion.')
else:
raise NotImplementedError
def get_size(self):
return self.size
def clear(self):
del self.voc, self.freq, self.doc_freq
self.voc = dict()
self.freq = dict()
self.doc_freq = dict()
self.size = 0
self._fixed_voc = False
def check_state(self):
return len(self.voc) == self.size and len(self.freq) == self.size and len(self.doc_freq) == self.size
def to_dict(self):
return self.voc
def set_signature(self, new_signature):
self.signature = new_signature
def save(self, file_name=None):
save_to = (file_name if file_name else self.name)+'-%s.voc'%self.signature
with open(save_to, 'wb') as f:
pickle.dump([self.voc, self.freq, self.doc_freq, self.size, self.min_word_len, \
self.oov, self._fixed_voc, self.name, self.signature], f)
@classmethod
def load(cls, file_name):
with open(file_name, 'rb') as f:
[voc, freq, doc_freq, size, min_word_len, oov, _fixed, name, signature] = pickle.load(f)
voc_from_file = cls(signature, name)
voc_from_file.voc = voc
voc_from_file.freq = freq
voc_from_file.doc_freq = doc_freq
voc_from_file.size = size
voc_from_file.min_word_len = min_word_len
voc_from_file.oov = oov
voc_from_file._fixed_voc = _fixed
voc_from_file.signature = signature
return voc_from_file
def test():
x = ['哈哈', '测试', '嘿', '嗨', '早上好', '哈哈', '嘿', '下午好', '测试', '你好', 'test', 'c', 'm']
voc = Vocabulary(signature=123, name='test', min_word_len=1)
voc.update(x)
print(voc.__class__.__name__)
print(voc.get('哈哈'), voc.get('测试'))
print(voc['早上好'], voc['c'])
print(voc.__sizeof__())
print('Voc size: %s' % voc.size)
print('`c` in voc: %s, `哈哈` in voc: %s' % ('c' in voc, '哈哈' in voc))
try:
del voc['a']
del voc['哈哈']
except Exception as e:
print(e)
voc.clear()
voc.update(x)
voc.update(x)
print(voc.voc)
print(voc.freq)
voc.save('voc_test.voc')
voc = Vocabulary.load('voc_test.voc')
print('Voc size: %s' % voc.size)
print(voc.voc)
print(voc.freq)
print(voc.doc_freq)
voc.shuffle()
print(voc.voc)
print(voc.freq)
print(voc.doc_freq)
if __name__ == '__main__':
test() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/vocabulary.py | vocabulary.py |
# -*- coding: utf-8 -*-
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
import os
from relativeness_analysis.relevant_analysis import main, test
from relativeness_analysis.manager import test as train_test
import warnings
warnings.filterwarnings('ignore')
DEBUG = False
PORT = 8006
HOST = '0.0.0.0'
app = Flask(__name__)
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.secret_key = 'skfasmknfdhflm-vkllsbzdfmkqo3ooishdhzo295949mfw,fk'
# APP_ROOT = os.path.abspath('.')
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
@app.route('/api/', methods=('GET', 'POST'))
def get_result():
# title = request.args.get('title', '')
# content = request.args.get('content', '')
# company = request.args.get('company', '')
# if title == '' and content == '':
# return '-2'
# _content = title + '。' + content
# # print(_content)
# relevant = test(_content, company)
# return relevant
file_path = request.form.get('file_path', None)
_all = request.form.get('_all', True)
prefix = request.form.get('prefix', './')
if file_path is None:
return '必须给定输入文件!'
if type(_all) == str:
_all = _all.lower()
if _all == 'false':
_all = False
elif _all == 'true':
_all = True
else:
return '_all参数错误,只能取值True或者False。'
print(file_path, _all, prefix)
result_file = main(file_path, _all=_all, prefix=prefix)
return result_file
@app.route('/api2/', methods=('GET', 'POST'))
def get_single_result():
title = request.form['title']
print(title)
content = request.form['content']
company = request.form['company']
if title == '' and content == '':
return '-2'
_content = title + '。' + content
# print(_content)
relevant = test(_content, company)
return relevant
@app.route('/train/', methods=('GET', 'POST'))
def train():
connection_string = request.form['connection_string']
begin_date = request.form['begin_date']
end_date = request.form['end_date']
try:
if (connection_string is None) and (begin_date is None) and (end_date is None):
print(r'正在使用默认参数训练模型,connection_string为cis/cis_zzsn9988@114.116.91.1:1521/orcl, begin_date为2017-03-01, end_date为2017-07-13')
train_test()
elif (connection_string == '') and (begin_date == '') and (end_date == ''):
print(r'正在使用默认参数训练模型,connection_string为cis/cis_zzsn9988@114.116.91.1:1521/orcl, begin_date为2017-03-01, end_date为2017-07-13')
train_test()
else:
print(r'正在使用指定参数训练模型,connection_string为%s, begin_date为%s, end_date为%s' % (connection_string, begin_date, end_date))
train_test(connection_string, begin_date, end_date)
except Exception as e:
return 'train fail'
else:
return 'train success'
app.run(debug=DEBUG, host=HOST, port=PORT) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/app.py | app.py |
import numpy as np
import jieba
import xlrd
import sys, time
import pickle
from relativeness_analysis.vocabulary import Vocabulary
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_selection import SelectFpr, mutual_info_classif, SelectPercentile
import scipy.linalg
from sklearn.base import BaseEstimator, TransformerMixin
class data_processor(object):
def __init__(self, data, transformer='tf', transformer_norm='l2'):
self.data = data
transformer = transformer.lower()
assert transformer in ['tf', 'tfidf']
self.transformer_type = transformer
self.transformer_norm = transformer_norm
self.transformer = None
# if not self.for_test:
# if vocab is not None:
# if type(vocab) == Vocabulary:
# self.vocab = vocab
# self.vocab.set_state(fixed=False)
# else:
# raise Exception('`vocab` should be of type `Vocabulary`.')
# else:
# self.vocab = Vocabulary(signature=int(time.time()), name='vocab')
def reset(self):
self.transformer = None
self.cv = None
def preprocess(self, _all=False, _emotion=False):
processed_data = {}
processed_label = {}
processed_label_dict = {}
label_set = ['保留', '删除']
label_dict = {0: '保留', 1: '删除'}
reverse_label_dict = {'保留': 0, '删除': 1}
# only_have_one_label_key = []
if _all:
if not _emotion: # _all=True, _emotion=False
processed_data['all'] = []
processed_label['all'] = []
processed_label_dict['all'] = label_dict
for key in self.data:
# processed_data['all'] += [' '.join(jieba.lcut(record[0])) for record in data[key]]
if len(processed_data.get('all')) == 0:
processed_data['all'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key]])
else:
processed_data['all'] = np.hstack((processed_data['all'], [' '.join(jieba.lcut(record[0])) for record in self.data[key]]))
label = [record[1] for record in self.data[key]]
if len(processed_label.get('all')) == 0:
processed_label['all'] = np.array([reverse_label_dict[l] for l in label])
else:
processed_label['all'] = np.hstack((processed_label['all'], [reverse_label_dict[l] for l in label]))
# processed_label['all'] += [reverse_label_dict[l] for l in label]
else: # _all=True, _emotion=True
processed_data['all-非负'] = []
processed_data['all-负'] = []
processed_label['all-非负'] = []
processed_label['all-负'] = []
processed_label_dict['all-非负'] = processed_label_dict['all-负'] = label_dict
for key in self.data:
if len(processed_data.get('all-非负')) == 0:
processed_data['all-非负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负'])
processed_label['all-非负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负'])
else:
processed_data['all-非负'] = np.hstack((processed_data['all-非负'], \
[' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负']))
processed_label['all-非负'] = np.hstack((processed_label['all-非负'], \
[reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负']))
if len(processed_data.get('all-负')) == 0:
processed_data['all-负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负'])
processed_label['all-负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负'])
else:
processed_data['all-负'] = np.hstack((processed_data['all-负'], \
[' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负']))
processed_label['all-负'] = np.hstack((processed_label['all-负'], \
[reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负']))
else:
for key in self.data:
if not _emotion: # _all=False, _emotion=False
processed_data[key] = [' '.join(jieba.lcut(record[0])) for record in self.data[key]]
label = [record[1] for record in self.data[key]]
# if len(set(label_set) - set(label)) != 0:
# print('%s: Only have one label(%s)' % (key, label[0]))
# only_have_one_label_key.append(key)
# assert len(set(label_set) - set(label)) == 0, 'It should have exactly two classes.'
# label_dict = {}
# reverse_label_dict = {}
# for i, k in enumerate(label_set):
# label_dict[i] = k
# reverse_label_dict[k] = i
# processed_label[key] = [reverse_label_dict[l] for l in label]
processed_label[key] = np.array([reverse_label_dict[l] for l in label])
processed_label_dict[key] = label_dict
processed_data[key] = np.array(processed_data[key])
else: # _all=False, _emotion=True
processed_data[key+'-非负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='非负'])
processed_data[key+'-负'] = np.array([' '.join(jieba.lcut(record[0])) for record in self.data[key] if record[2]=='负'])
processed_label[key+'-非负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='非负'])
processed_label[key+'-负'] = np.array([reverse_label_dict[record[1]] for record in self.data[key] if record[2]=='负'])
processed_label_dict[key+'-非负'] = label_dict
processed_label_dict[key+'-负'] = label_dict
# processed_data[key] = processed_data[key]
return processed_data, processed_label, processed_label_dict
def update_vocab(self, vocab, processed_data):
if type(processed_data) == dict:
for key in processed_data:
for record in processed_data[key]:
vocab.update(record.split(' '))
else:
for record in processed_data:
vocab.update(record.split(' '))
assert vocab.check_state(), 'Something wrong with vocabulary.'
def transform(self, vocab, data, label, with_feature_selection=False, feature_selection_method='FDA', binary=False):
vocab.set_state(fixed=True)
assert feature_selection_method in ['FDA', 'SelectPercentile']
if not self.transformer:
self.cv = CountVectorizer(decode_error='replace', vocabulary=vocab.to_dict(), binary=binary)
if self.transformer_type == 'tf':
self.transformer = TfidfTransformer(norm=self.transformer_norm, use_idf=False)
else:
self.transformer = TfidfTransformer(norm=self.transformer_norm, use_idf=True)
if type(data) == dict:
transformed_data = {}
for key in data:
if with_feature_selection:
if feature_selection_method == 'FDA':
transformed_data[key] = FDA().fit_transform(
self.transformer.transform(self.cv.transform(data[key])), label[key]
)
else:
transformed_data[key] = SelectPercentile(mutual_info_classif, 20).fit_transform(
self.transformer.transform(self.cv.transform(data[key])), label[key]
)
else:
transformed_data[key] = self.transformer.transform(self.cv.transform(data[key]))
else:
if with_feature_selection:
if feature_selection_method == 'FDA':
transformed_data = FDA().fit_transform(
self.transformer.transform(self.cv.transform(data)), label
)
else:
transformed_data = SelectPercentile(mutual_info_classif, 20).fit_transform(
self.transformer.transform(self.cv.transform(data)), label
)
else:
transformed_data = self.transformer.transform(self.cv.transform(data))
return transformed_data
class FDA(BaseEstimator, TransformerMixin):
def __init__(self, alpha=1e-4):
'''Fisher discriminant analysis
Arguments:
----------
alpha : float
Regularization parameter
'''
self.alpha = alpha
def fit(self, X, Y):
'''Fit the LDA model
Parameters
----------
X : array-like, shape [n_samples, n_features]
Training data
Y : array-like, shape [n_samples]
Training labels
Returns
-------
self : object
'''
n, d_orig = X.shape
classes = np.unique(Y)
assert(len(Y) == n)
if isinstance(X, scipy.sparse.csr.csr_matrix):
mean_global = X.mean(axis=0)
else:
mean_global = np.mean(X, axis=0, keepdims=True)
scatter_within = self.alpha * np.eye(d_orig)
scatter_between = np.zeros_like(scatter_within)
for c in classes:
n_c = np.sum(Y==c)
if n_c < 2:
continue
if isinstance(X, scipy.sparse.csr.csr_matrix):
mu_diff = X[Y==c].mean(axis=0) - mean_global
else:
mu_diff = np.mean(X[Y==c], axis=0, keepdims=True) - mean_global
scatter_between = scatter_between + n_c * np.dot(mu_diff.T, mu_diff)
if isinstance(X, scipy.sparse.csr.csr_matrix):
scatter_within = scatter_within + n_c * np.cov(X[Y==c].todense(), rowvar=0)
else:
scatter_within = scatter_within + n_c * np.cov(X[Y==c], rowvar=0)
e_vals, e_vecs = scipy.linalg.eig(scatter_between, scatter_within)
self.e_vals_ = e_vals
self.e_vecs_ = e_vecs
self.components_ = e_vecs.T
return self
def transform(self, X):
'''Transform data by FDA
Parameters
----------
X : array-like, shape [n_samples, n_features]
Data to be transformed
Returns
-------
X_new : array, shape (n_samples, n_atoms)
'''
return X.dot(self.components_.T)
def fit_transform(self, X, Y):
self.fit(X, Y)
return self.transform(X)
def test():
# file_path = 'D:\\学习\\研究生\\文本挖掘项目\\舆情正负面判别\\舆情标引信息-20170104.xlsx'
file_path = 'test.xlsx'
idx_dict = {}
idx_dict['content_begin_with'] = 1
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['relativeness_col'] = 5 # 相关性在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
vocab = Vocabulary(signature=123, name='test', min_word_len=2)
dp = data_processor(file_path, config=idx_dict, for_test=False)
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=True)
dp.update_vocab(vocab, processed_data)
print(vocab.get_size())
for i, word in enumerate(vocab):
if i < 20:
id = vocab[word]
print('[%s] id: %s, freq: %s, doc_freq: %s' % (word, id, vocab.freq[id], vocab.doc_freq[id]))
else:
break
vocab.save('vocab')
transformed_data = dp.transform(vocab, processed_data['all'])
print(transformed_data.shape)
if __name__ == '__main__':
test() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/utils.py | utils.py |
from __future__ import print_function
import xlrd
import numpy as np
import scipy.sparse.csr
import scipy.sparse.csc
import pickle
# from gensim import models
import sys, os
from relativeness_analysis.utils import *
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from xgboost import XGBClassifier
class LogisticRegression(object):
def __init__(self, label_dict, signature, learning_rate='optimal', penalty='l1', alpha=1e-3, eta0=0.0, class_weight='balanced', thres=0.5):
self.label_dict = label_dict
self.signature = signature
self.lr = learning_rate
self.penalty = penalty
self.alpha = alpha
self.eta0 = eta0
self.class_weight = class_weight
self.thres = thres
self.loss = 'log'
self.clf = None
def set_signature(self, new_signature):
self.signature = new_signature
@staticmethod
def train_test_split(X, Y, train_ratio=0.8):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
N = X.shape[0]
N_train = int(N*train_ratio)
N_test = N - N_train
assert N_train > 0 and N_test > 0, '训练集或测试集必须至少有一个样本'
idx = np.random.permutation(N)
return (X[idx[:N_train]], Y[idx[:N_train]]), (X[idx[N_train:]], Y[idx[N_train:]])
def train(self, X, Y, save_to=None, initial_coef=None, initial_intercept=None, verbose=False):
assert len(self.label_dict) == 2, 'It should have exactly two classes.'
if isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray):
data = X
else:
data = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
label = Y.todense()
else:
label = np.array(Y, copy=False)
if len(np.unique(label)) == 1:
print('Only contains one label, training stopped.')
return
# print('Training...')
sgd = SGDClassifier(loss=self.loss, penalty=self.penalty, alpha=self.alpha, class_weight=self.class_weight, \
learning_rate=self.lr, eta0=self.eta0, verbose=verbose)
if initial_coef is None and initial_intercept is None:
self.clf = sgd.fit(data, label, coef_init=initial_coef, intercept_init=initial_intercept)
else:
self.clf = sgd.fit(data, label)
# print('Finished.')
if save_to:
# print('Saving model...')
self.save(save_to)
def save(self, save_to):
file_name = save_to + ('-%s.lr' % self.signature)
with open(file_name, 'wb') as f:
pickle.dump((self.clf, self.label_dict, self.signature), f)
@staticmethod
def load(file_path):
with open(file_path, 'rb') as f:
clf, label_dict, signature = pickle.load(f)
lr = LogisticRegression(label_dict, signature)
lr.clf = clf
return lr
def predict(self, X, return_real_label=False):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
label = np.ones((prob.shape[0],))
label[prob[:,0] >= self.thres] = 0
if return_real_label:
return [self.label_dict[l] for l in label]
else:
return label
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def predict_proba(self, X):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
return prob
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def report(self, X, Y, verbose=True):
if not(isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray)):
X = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
N = X.shape[0]
assert len(Y) == N
if not self.clf:
print('模型还没训练,请先训练模型')
return
else:
predicted_Y = self.predict(X)
score = self.compute_score(Y, predicted_Y)
recall = score['recall']
precision = score['precision']
F1 = score['F1']
if verbose:
for i in range(N):
print('\tData id@%d, real label: %s, predicted label: %s' % \
(i, self.label_dict[Y[i]], self.label_dict[predicted_Y[i]]))
print('Correct rate: %s' % (np.mean(predicted_Y == Y)))
for key in self.label_dict:
print('Article num of label %s on training dataset: %s, recall: %.3f, precision: %.3f, F1: %.3f' % \
(self.label_dict[key], np.sum(Y == key), recall[key], precision[key], F1[key]))
def compute_score(self, Y, predicted_Y):
recall = {}
precision = {}
F1 = {}
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
if isinstance(predicted_Y, scipy.sparse.csr.csr_matrix):
predicted_Y = predicted_Y.todense()
else:
predicted_Y = np.array(predicted_Y, copy=False)
for key in self.label_dict:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 1.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 1.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
class xgboost(object):
def __init__(self, label_dict, signature, lr=0.1, reg_alpha=0, reg_lambda=1, objective='binary:logitraw', \
with_sample_weight=True, subsample=1, min_child_weight=1, scale_pos_weight=1, thres=0.5):
self.lr = lr
self.label_dict = label_dict
self.signature = signature
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.objective = objective
self.with_sample_weight = with_sample_weight
self.min_child_weight = min_child_weight
self.scale_pos_weight = scale_pos_weight
self.thres = thres
self.clf = None
def set_signature(self, new_signature):
self.signature = new_signature
def train(self, X, Y, save_to=None):
assert len(self.label_dict) == 2, 'It should have exactly two classes.'
if isinstance(X, scipy.sparse.csr.csr_matrix):
data = X.tocsc()
elif isinstance(X, np.ndarray):
data = X
else:
data = np.array(X, copy=False)
if isinstance(Y, scipy.sparse.csr.csr_matrix):
label = Y.todense()
else:
label = np.array(Y, copy=False)
if len(np.unique(label)) == 1:
print('Only contains one label, training stopped.')
return
N_0 = np.sum(label == 0)
N_1 = np.sum(label == 1)
w_0 = (N_0 + N_1) / (2. * N_0)
w_1 = (N_0 + N_1) / (2. * N_1)
# w_0 = w_0 * 1.3
# w_1 = w_1 / 1.1
# print(w_0, w_1)
# print('Training...')
self.clf = XGBClassifier(reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda, objective=self.objective, \
min_child_weight=self.min_child_weight, scale_pos_weight=self.scale_pos_weight, learning_rate=self.lr)
if self.with_sample_weight:
self.clf.fit(data, label, sample_weight=[w_0 if l == 0 else w_1 for l in label])
else:
self.clf.fit(data, label)
# print('Finished.')
if save_to:
# print('Saving model...')
self.save(save_to)
def save(self, save_to):
file_name = save_to + ('-%s.xgb' % self.signature)
with open(file_name, 'wb') as f:
pickle.dump((self.clf, self.label_dict, self.signature), f)
@staticmethod
def load(file_path):
with open(file_path, 'rb') as f:
clf, label_dict, signature = pickle.load(f)
xgb = xgboost(label_dict, signature)
xgb.clf = clf
return xgb
def predict(self, X, return_real_label=False):
prob = self.predict_proba(X)
label = np.ones((prob.shape[0],))
label[prob[:,0] >= self.thres] = 0
if return_real_label:
return [self.label_dict[l] for l in label]
else:
return label
def predict_proba(self, X):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray) or isinstance(X, scipy.sparse.csc.csc_matrix)):
X = np.array(X, copy=False)
if isinstance(X, scipy.sparse.csr.csr_matrix):
X = X.tocsc()
if self.clf and X.shape[0] > 0:
if len(X.shape) == 1:
X = [X]
prob = self.clf.predict_proba(X)
return prob
else:
if not self.clf:
print('模型还没训练,请先训练模型')
else:
print('数据不能为空')
def report(self, X, Y, verbose=True):
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
N = X.shape[0]
assert len(Y) == N
if not self.clf:
print('模型还没训练,请先训练模型')
return
else:
predicted_Y = self.predict(X)
score = self.compute_score(Y, predicted_Y)
recall = score['recall']
precision = score['precision']
F1 = score['F1']
if verbose:
for i in range(N):
print('\tData id@%d, real label: %s, predicted label: %s' % \
(i, self.label_dict[Y[i]], self.label_dict[predicted_Y[i]]))
print('Correct rate: %s' % (np.mean(predicted_Y == Y)))
for key in self.label_dict:
print('Article num of label %s on training dataset: %s, recall: %.3f, precision: %.3f, F1: %.3f' % \
(self.label_dict[key], np.sum(Y == key), recall[key], precision[key], F1[key]))
@staticmethod
def train_test_split(X, Y, train_ratio=0.8):
if not (isinstance(X, scipy.sparse.csr.csr_matrix) or isinstance(X, np.ndarray) or isinstance(X, scipy.sparse.csc.csc_matrix)):
X = np.array(X, copy=False)
N = X.shape[0]
N_train = int(N*train_ratio)
N_test = N - N_train
assert N_train > 0 and N_test > 0, '训练集或测试集必须至少有一个样本'
idx = np.random.permutation(N)
return (X[idx[:N_train]], Y[idx[:N_train]]), (X[idx[N_train:]], Y[idx[N_train:]])
def compute_score(self, Y, predicted_Y):
recall = {}
precision = {}
F1 = {}
if isinstance(Y, scipy.sparse.csr.csr_matrix):
Y = Y.todense()
else:
Y = np.array(Y, copy=False)
if isinstance(predicted_Y, scipy.sparse.csr.csr_matrix):
predicted_Y = predicted_Y.todense()
else:
predicted_Y = np.array(predicted_Y, copy=False)
for key in self.label_dict:
N_key = np.sum(Y == key)
if N_key == 0:
recall[key] = 1.0
else:
recall[key] = np.sum((Y == key)*(predicted_Y == key))/(N_key+0.0)
N_predicted_pos = np.sum(predicted_Y == key)
if N_predicted_pos == 0:
precision[key] = 1.0
else:
precision[key] = np.sum((Y == key)*(predicted_Y == key))/(N_predicted_pos+0.0)
F1[key] = 2*recall[key]*precision[key]/(recall[key]+precision[key])
return {'recall': recall, 'precision': precision, 'F1': F1}
def test():
# file_path = 'D:\\学习\\研究生\\文本挖掘项目\\舆情正负面判别\\舆情标引信息-20170104.xlsx'
file_path = 'test.xlsx'
idx_dict = {}
idx_dict['content_begin_with'] = 1
idx_dict['article_col'] = 1 # 内容在excel文件的哪一列(下标从0开始)
idx_dict['title_col'] = 0 # 标题在excel的哪一列(下标从0开始)
idx_dict['relativeness_col'] = 5 # 相关性在excel的哪一列(下标从0开始)
idx_dict['topic_col'] = 4 # 企业在excel的哪一列(下标从0开始)
vocab = Vocabulary(signature=123, name='vocab', min_word_len=2)
dp = data_processor(file_path, config=idx_dict, for_test=False, transformer_norm='l2')
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=True)
dp.update_vocab(vocab, processed_data)
# # shuffle the vocabulary, this does not affect the results that much
# dp.vocab.shuffle()
transformed_data = dp.transform(vocab, processed_data)
vocab.save('vocab')
LR = LogisticRegression(label_dict=processed_label_dict['all'], signature=vocab.signature, thres=0.4)
(X_train, Y_train), (X_test, Y_test) = LR.train_test_split(transformed_data['all'], processed_label['all'], train_ratio=0.8)
LR.train(X_train, Y_train, save_to='test_clf')
print('On training dataset:')
LR.report(X_train, Y_train)
print('On test dataset:')
LR.report(X_test, Y_test)
Y_test_predicted = LR.predict(X_test)
print(LR.compute_score(Y_test, Y_test_predicted))
print(LR.clf.coef_.shape, LR.clf.intercept_.shape)
if __name__ == '__main__':
test() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/classifier2.py | classifier2.py |
from relativeness_analysis.vocabulary import Vocabulary
from relativeness_analysis.classifier2 import xgboost
from relativeness_analysis.utils import data_processor
import time, os
import numpy as np
# import pandas as pd
import cx_Oracle
import pickle
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
class TrainManager(object):
def __init__(self):
self.signature = int(time.time())
def read_sql(self, sql, con):
# print('Fetching data from remote sql...')
# raw_data = pd.read_sql_query(sql, con)
# with open('data.pd', 'wb') as f:
# pickle.dump(raw_data, f)
# raw_data.to_excel('raw_data.xlsx')
# data = {}
# for record in raw_data.iterrows:
# company = record['tid'].strip()
# article = record['content_no_tag'].strip()
# title = record['title'].strip()
# relevant = record['relevance'].strip()
# emotion = '非负' # record['emotion'].strip()
# data[company] = data.get(company, []) + [(title+'。'+article, relevant, emotion)]
# conn.close()
# return data
cursor = con.cursor()
cursor.execute(sql)
data = {}
def convert(col):
if isinstance(col, cx_Oracle.LOB):
return col.read().decode('utf-8')
else:
return col
for record in cursor:
company = convert(record[2])
title = convert(record[0])
article = convert(record[1])
relevant = convert(record[3])
if article is None:
continue
else:
if relevant is None:
relevant = 1
if title is not None:
title = title.strip()
else:
title = ''
article = article.strip()
relevant = '保留' if relevant == 0 else '删除'
emotion = '非负'
data[company] = data.get(company, []) + [(title+'。'+article, relevant, emotion)]
con.close()
return data
def make_dirs(self, path):
dir_path = os.path.join(os.getcwd(), path)
if not os.path.isdir(dir_path):
os.makedirs(dir_path)
def train(self, sql, con, _all=False, _emotion=False, config=None, transformer='tf', transformer_norm='l2', save_to_folder=None, \
lr=0.1, reg_alpha=0, reg_lambda=1, objective='binary:logitraw', with_sample_weight=True, subsample=1, \
min_child_weight=1, scale_pos_weight=1, thres=0.5, train_ratio=0.8):
print('Fetching data from remote SQL...')
data = self.read_sql(sql, con)
print('Done!')
dp = data_processor(data, transformer=transformer, transformer_norm=transformer_norm)
processed_data, processed_label, processed_label_dict = dp.preprocess(_all=_all, _emotion=_emotion)
# print(processed_label)
for company in processed_data:
if len(processed_label[company]) == 0:
print('%s 没有数据!跳过该类!' % company)
continue
try:
dp.reset()
vocab = Vocabulary(signature=self.signature, name='vocab-%s'%company, min_word_len=2)
dp.update_vocab(vocab, processed_data[company])
print('%s, after updating, %s' % (company, vocab.get_size()))
transformed_data = dp.transform(vocab, processed_data[company], processed_label[company])
self.make_dirs(save_to_folder)
vocab_save_to = os.path.join(save_to_folder, 'vocab-%s' % company)
vocab.save(vocab_save_to) # vocab-all-1491195238.voc
xgb = xgboost(processed_label_dict[company], self.signature, lr=lr, reg_alpha=reg_alpha, reg_lambda=reg_lambda, \
objective=objective, with_sample_weight=with_sample_weight, subsample=subsample, thres=thres,\
min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight)
(X_train, Y_train), (X_test, Y_test) = xgb.train_test_split(transformed_data, processed_label[company], train_ratio=train_ratio)
print('Training on %s' % company)
if reg_alpha > 0 and reg_lambda > 0:
penalty = 'l1+l2'
elif reg_alpha > 0:
penalty = 'l1'
elif reg_lambda > 0:
penalty = 'l2'
else:
penalty = 'None'
# xgboost-all-tf-l1+l2-l2-0.5-1496718804.xgb
clf_save_to = os.path.join(save_to_folder, 'xgboost-%s-%s-%s-%s-%s' % (company, transformer, penalty, transformer_norm, thres))
xgb.train(X_train, Y_train, save_to=clf_save_to)
print('On training dataset:')
xgb.report(X_train, Y_train, verbose=False)
print('On test dataset:')
xgb.report(X_test, Y_test, verbose=False)
except Exception as e:
print(e)
# raise e
def test(connection_string = 'cis/cis_zzsn9988@118.190.174.96:1521/orcl', begin_date = '2017-03-01', end_date = '2017-07-13'):
'''
begin_date:开始日期
end_date:结束日期
'''
# company list
# company_list = ['3745', '3089', '3748', '2783', '3440']
company_list = ['3745,3089,3748,2783,3440', '3741,3420,3319']
# 模型参数
save_to_folder = './tmp' # 存放训练结果(分类器和词典)的目录
_all = False # 是否区分企业进行训练,True表示不区分
_emotion = False # 是否区分情感正负面进行训练,True表示区分
thres = 0.5
lr = 0.1
reg_alpha = 0
reg_lambda = 1
objective = 'binary:logitraw'
with_sample_weight = True
subsample = 1
min_child_weight = 1
scale_pos_weight = 1
for company in company_list:
ora_conn = cx_Oracle.connect(connection_string)
sql_query = '''select b.title,b.content_no_tag,'P'||t.tid as tid,t.delflag as relevance from cis_ans_basedata b inner join cis_ans_basedata_type t on (b.id=t.bid and t.delflag is not null)
where (b.orientation !=2 or b.orientation is null)
and t.tid in (%s)
and B.Publish_Date > '%s' and B.Publish_Date < '%s' ''' % (company, begin_date, end_date)
# print(sql_query)
tm = TrainManager()
tm.train(sql_query, ora_conn, _all=_all, _emotion=_emotion, save_to_folder=save_to_folder, lr=lr, reg_alpha=reg_alpha, \
reg_lambda=reg_lambda, objective=objective, with_sample_weight=with_sample_weight, subsample=subsample, \
thres=thres, min_child_weight=min_child_weight, scale_pos_weight=scale_pos_weight)
if __name__ == '__main__':
test() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relativeness_analysis/manager.py | manager.py |
#please refer to https://hub.tensorflow.google.cn/google/bert_chinese_L-12_H-768_A-12/1
import sys
sys.path.insert(0, 'D:/peking_code/code_python/Bert201912/bert-master')
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import bert
from bert import run_classifier
from bert import optimization
from bert import tokenization
# 问题: 怎样降维
#############################################################################################
#how the input preprocessing should be done to retrieve the input ids, masks, and segment ids:
def create_tokenizer_from_hub_module(bert_model_hub):
#with tf.Graph().as_default():
bert_module = hub.Module(bert_model_hub)
tokenization_info = bert_module(signature="tokenization_info", as_dict=True)
with tf.compat.v1.Session() as sess:
vocab_file, do_lower_case = sess.run([tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]])
return tokenization.FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
def convert_text_to_features(model,text_): #created by zzb 20200615
tokenizer = create_tokenizer_from_hub_module(model)
example_ = run_classifier.InputExample(guid='',text_a=text_, label='A')
MAX_SEQ_LENGTH=128
input_feature = run_classifier.convert_single_example(0, example_, ['A','B'], MAX_SEQ_LENGTH, tokenizer)
features1 = []
features2 = []
features3 = []
features1.append(input_feature.input_ids)
features2.append(input_feature.input_mask)
features3.append(input_feature.segment_ids)
bert_inputs = dict(
input_ids=tf.convert_to_tensor(np.array(features1)),
input_mask=tf.convert_to_tensor(np.array(features2)),
segment_ids=tf.convert_to_tensor(np.array(features3)))
return bert_inputs
#############################################################################################
def text2vec(text_):
model_ = "../embeding"
#model_ = "https://hub.tensorflow.google.cn/google/bert_chinese_L-12_H-768_A-12/1"
bert_inputs = convert_text_to_features(model_,text_)
hub_layer = hub.Module(model_, trainable=True)
_output = hub_layer(bert_inputs, signature="tokens", as_dict=True)
with tf.compat.v1.Session() as sess:
tf.compat.v1.global_variables_initializer().run()
pooled_output = sess.run(_output["pooled_output"]) #The pooled_output is a [batch_size, hidden_size] Tensor
#print(type(pooled_output[0]))
return pooled_output[0].tolist() #size: hidden_size
if __name__ == '__main__':
print("Version: ", tf.__version__)
print("Eager mode: ", tf.executing_eagerly())
print("Hub version: ", hub.__version__)
t = "要坚持以习近平新时代中国特色社会主义思想为指导,深入学习贯彻党的十九届四中全会精神"
print(text2vec(t))
# =============================================================================
#model = "https://storage.googleapis.com/tfhub-modules/google/bert_chinese_L-12_H-768_A-12/1.tar.gz"
#model = "https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/1"
#============================================================================= | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/hub_TextEmbedding.py | hub_TextEmbedding.py |
from flask import Flask, g, render_template, flash, redirect, url_for, request, abort, session
from werkzeug.utils import secure_filename
import time
import os
#import sys
#sys.path.append("./src")
#os.chdir(os.path.join(os.getcwd(),'src'))
#import rel_prediction
import traceback
from relation_extraction.preprocessing_xls import paragraph_sectioning,preprocessing_xls_4train,preprocessing_xls_4pred
from relation_extraction.rel_train import train_
from relation_extraction.rel_prediction import prediction_,left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer,get_high_prob_excel
DEBUG = False
PORT = 8017
HOST = '0.0.0.0'
app = Flask(__name__)
@app.route('/', methods=('GET', 'POST'))
def index():
return ''
# def index():
# query_form = QueryForm()
# if request.method == 'POST':
# if query_form.validate_on_submit():
# query_file = request.files.get('query', None)
# query_file_name = secure_filename(str(int(time.time()*100)) + '_' + query_file.filename)
# path = os.path.join(app.config['UPLOAD_FOLDER'], query_file_name)
# query_file.save(path)
# session['query_file'] = query_file_name
# return redirect(url_for('show_result'))
# return render_template('index.html', query_form=query_form)
@app.route('/proces_train_xls/', methods=('GET', 'POST'))
def preprocessing_4train():
xls_name = str(request.form.get('xls_name'))
print(xls_name)
if not os.path.isfile(xls_name):
return 'xls文件不存在!'
try:
preprocessing_xls_4train(xls_name)
except Exception as e:
print(e)
return '0'
return '1'
@app.route('/train/', methods=('GET', 'POST'))
def get_train():
try:
train_()
except Exception as e:
print(e)
return '0'
return '1'
from flask import jsonify
import json
def get_return_info(_message, _prob_sorted=None, _text=None):
return json.dumps({'message': _message, 'prob': _prob_sorted, 'min_text': _text},ensure_ascii=False)
@app.route('/predict/', methods=('GET', 'POST'))
def get_prediction():
e1 = request.form.get('e1')
e2 = request.form.get('e2')
test_text = str(request.form.get('test_text'))
# list = {'a1':0.9,'a2':0.8}
# content = {'e1': list, 'e2': e2, 'text': test_text}
# return jsonify(content)
if (e1 is None) and (e2 is not None):
return get_return_info('输入格式应是:e1=XX&e2=YY&test_text=ZZ, 且句中应有左右实体名')
if (e2 is None) and (e1 is not None):
return get_return_info('输入格式应是:e1=XX&e2=YY&test_text=ZZ, 且句中应有左右实体名')
if (e1 is None):
print('左实体名 is None\n')
if (e2 is None):
print('右实体名 is None\n')
#test_text = str(request.args.get('test_text'))
#print(rel_prediction.OLD_URL)
# print(type(test_text),test_text)
#test_text = str('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。国有企业是推进国家现代化、保障人民共同利益的重要力量,是党和国家事业发展的重要物质基础和政治基础。深化国有企业改革是坚持和发展中国特色社会主义的必然要求,深化国有企业改革是实现“两个一百年”奋斗目标的重大任务,深化国有企业改革是推动我国经济持续健康发展的客观要求。在明确国企深化改革的重要性后,郑新聪强调,下一步要牢牢把握国有企业改革的正确方向。首先,要坚持和完善基本经济制度。必须毫不动摇巩固和发展公有制经济,毫不动摇鼓励、支持、引导非公有制经济发展。坚持公有制主体地位,发挥国有经济主导作用,做强做优做大国有企业。其次,要坚持社会主义市场经济改革方向。遵循市场经济规律和企业发展规律,坚持政企分开、政资分开、所有权与经营权分离,坚持权利、义务、责任相统一,促使国有企业真正成为独立市场主体。再者,坚持以解放和发展生产力为标准。始终把握有利于国有资产保值增值、有利于提高国有经济竞争力、有利于放大国有资本功能的要求,着力破除束缚国有企业发展的体制机制障碍,发挥国有企业各类人才积极性、主动性、创造性。同时,坚持增强活力与强化监管相结合。增强活力是搞好国有企业的本质要求,强化监管是搞好国有企业的重要保障,必须处理好两者关系,切实做到有机统一。此外,要更加坚持党对国有企业的领导。坚持党对国有企业的领导是重大政治原则,必须一以贯之。2016年10月,习近平在全国国有企业党的建设工作会议上指出:中国特色现代国有企业制度,“特”就特在把党的领导融入公司治理各环节。党建写入章程真正融入国企中心工作,章程明确了党组织在公司法人治理结构中的法定地位,特别是党组织在决策、执行、监督各环节的权责和工作方式。值得一提的是,郑新聪充分肯定三钢集团公司党委探索出的党支部密切联系群众的“五小工作法”,通过为群众讲清小道理、解决小问题、办好小事情、选树小典型、开展小活动,实现党建工作与生产经营、职工生活有机融合。随后,郑新聪指出,省属企业要扎实做好新时期深化国有企业改革的重点任务。“省属企业要完善各类国有资产管理体制。建立健全各类国有资产监督法律法规体系。以管资本为主深化国有资产监管要加快国有经济布局优化、结构调整、战略性重组。”郑新聪指出,省属企业要围绕服务国家战略,推动国有经济向关系国家安全、国民经济命脉和国计民生的重要行业和关键领域、重点基础设施集中。加快处置低效无效资产,淘汰落后产能,剥离办社会职能,解决历史遗留问题,提高国有资本配置效率。日前,国务院国资委下发了《关于加强国有企业资产负债约束的指导意见》是落实党的十九大精神,推动国有企业降杠杆、防范化解国有企业债务风险的重要举措,促使高负债国有企业资产负债率尽快回归合理水平。郑新聪指出,近年来,福建省省属企业也呈现一批改革发展典型。三钢集团通过兼并重组整合区域资源,集团钢产量成功突破1100万吨,真正步入大型钢铁企业行列。特别是2014年重组三金钢铁有限公司,形成了现在的罗源闽光钢铁公司,通过优化机制,改善工艺,2016年扭亏为盈,2018年18月份盈利10.74亿元,资产负债率从90降至目前的40,让一个濒临倒闭的企业成为一个福州区域明星企业,成为钢铁行业兼并重组成功典范。星网锐捷旗下凯米网络科技有限公司积极探索商业模式创新,向KTV提供“管理、流量、内容、广告”四大核心价值,构建互联网聚会娱乐新生态,用户超7500万,成为行业独角兽。发展混合所有制经济亦是新时期深化国有企业改革的重点任务。积极推进主业处于充分竞争行业和领域的商业类国有企业混合所有制改革,有效探索重点领域混合所有制改革,在引导子公司层面改革的同时探索在集团公司层面推进混合所有制改革。大力推动国有企业改制上市。稳妥有序开展国有控股混合所有制企业员工持股。此外,形成有效制衡的公司法人治理结构和灵活高效的市场化经营机制,加强监管有效防止国有资产流失。以国有资产保值增值、防止流失为目标,加强对企业关键业务、改革重点领域、国有资本运营重要环节的监督。建立健全国有企业重大决策失误和失职、渎职责任追究倒查机制。加强审计监督、纪检监督、巡查监督,形成监督合力。郑新聪表示,培育具有全球竞争力的世界一流企业也是目前省属企业的重点任务之一。支持国有企业深入开展国际化经营,在“一带一路”建设中推动优势产业走出去。')
#test_text = str('郝鹏 太钢精带公司 国务院国资委党委书记、主任郝鹏到太钢精带公司调研')
try:
test_text7,min_text,original_text = paragraph_sectioning(test_text,e1,e2)
if len(test_text7) < 20:
return get_return_info(test_text7)
message_,prob_text,prob_dict_sorted = prediction_(test_line = test_text7)
except:
return get_return_info(traceback.print_exc())
#content_ = {'message': message_, 'prob': prob_dict_sorted, 'min_text': min_text}
return get_return_info(message_, prob_dict_sorted, min_text)
# if (len(min_text) + 10) > len(original_text):
# return result
# return result + '<br> ' + min_text
@app.route('/predict_mass/', methods=('GET', 'POST'))
def get_predict_mass():
xls_name = str(request.form.get('xls_name'))
try:
tsv_fullname = preprocessing_xls_4pred(xls_name)
if not os.path.isfile(tsv_fullname):
return '处理预测文件tsv不存在!'
result = prediction_(filename_ = tsv_fullname)
except:
return traceback.print_exc()
return result
@app.route('/predict_high_prob/', methods=('GET', 'POST'))
def get_high_predict_mass():
xls_name = str(request.form.get('xls_name'))
prob_threshold_ = float(str(request.form.get('prob_threshold')))
if not os.path.isfile(xls_name):
return '被检索的xls文件不存在!'
try:
xls_fullname = get_high_prob_excel(predicted_result_file = xls_name,prob_threshold = prob_threshold_)
except:
return traceback.print_exc()
return xls_fullname
app.run(debug=DEBUG, host=HOST, port=PORT) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/app.py | app.py |
import jieba
import re
import os
import xlwt
# 使用停用词
filepath = os.path.dirname(os.path.realpath(__file__))
stop = open(os.path.join(filepath, './user_data/stop.txt'), 'r+', encoding='utf-8')
stopword = stop.read().split("\n")
# 最长句子长度
word_len = 600
# 判断汉字个数
def han_number(char):
number = 0
for item in char:
if 0x4E00 <= ord(item) <= 0x9FA5:
number += 1
return number
# 分句
def cut_j(text_):
text = re.sub('([。!?\?])([^”’])', r"\1\n\2", text_)
text = re.sub('([。!?\?][”’])([^,。!?\?])', r'\1\n\2', text)
text = text.rstrip().split("\n")
#k = math.floor(han_number(text_)/600)
j = 0
t = ['']
for i in text:
if han_number(t[j])<word_len:
t[j] = t[j]+i
else:
t.append('')
j = j+1
return t
# 判断距离
def lenc(x,y,z):
xl = han_number(x)
yl = han_number(y)
xx = [10000]
yy = [20000]
min_ = 1000
for i in range(han_number(z)-max(xl,yl)):
if z[i:i+xl] == x:
xx.append(i)
xx.append(i+xl)
if z[i:i+yl] == y:
yy.append(i)
yy.append(i+yl)
# print(xx,yy)
a = 0
b = 0
for i in xx:
for j in yy:
if min_>abs(i-j):
a = i
b = j
min_ = abs(i-j)
if a>b:
return min_,y,x,b,a
else:
return min_,x,y,a,b
def keyword(entity_1,entity_2,text_,ii=0,jj=0):
key = {'left':[],'mention_1':[entity_1],'middle':[],'mention_2':[entity_2],'right':[]}
key['left'] = list(jieba.cut(text_[:ii-len(entity_1)]))
key['middle'] = list(jieba.cut(text_[ii:jj]))
key['right'] = list(jieba.cut(text_[jj+len(entity_2):]))
print('关键信息提取--------------------------')
print(key)
return key
###########################################3
def k(text,x='',y=''):
min_txt = ['0',1000]
if x == '':
p = 0
k = list(jieba.cut(text))
d = {}
for i in k:
if i in stopword:
continue
if i in d:
d[i] += 1
else:
d[i] = 1
m1 = ['1',1]
m2 = ['2',0]
for i in d:
if int(d[i])>=m1[1]:
m2[0] = m1[0]
m2[1] = m1[1]
m1[1] = d[i]
m1[0] = i
elif d[i]>m2[1]:
m2[0] = i
m2[1] = d[i]
else:
m1 = [x,0]
m2 = [y,0]
jl = cut_j(text)
keyword_ = []
for str_ in jl:
p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
if min_txt[1]>p:
min_txt[0] = str_
min_txt[1] = p
keyword_ = [xx,yy,ii,jj]
print('关键词---------------------------------------')
print(keyword_[0],keyword_[1])
print('这句话两个词相距最近-------------------------')
print(min_txt)
keyword(keyword_[0],keyword_[1],min_txt[0],keyword_[2],keyword_[3])
return min_txt,m1,m2
# =============================================================================
# def position_(text,x='',y=''):
# min_txt = ['0',1000]
# if x == '':
# p = 0
# k = list(jieba.cut(text))
# d = {}
# for i in k:
# if i in stopword:
# continue
# if i in d:
# d[i] += 1
# else:
# d[i] = 1
# m1 = ['1',1]
# m2 = ['2',0]
# for i in d:
# if int(d[i])>=m1[1]:
# m2[0] = m1[0]
# m2[1] = m1[1]
# m1[1] = d[i]
# m1[0] = i
# elif d[i]>m2[1]:
# m2[0] = i
# m2[1] = d[i]
# else:
# m1 = [x,0]
# m2 = [y,0]
# jl = cut_j(text)
# keyword_ = []
# for str_ in jl:
# p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
# if min_txt[1]>p:
# min_txt[0] = str_
# min_txt[1] = p
# keyword_ = [xx,yy,ii,jj]
# print('关键词: ',xx,yy,'出现在下面这段话,且距离最近:\n')
#
# print(min_txt)
#
# return ii,jj
#
# =============================================================================
def position_mintxt(text,x='',y=''):
min_txt = ['0',1000]
if x == '':
p = 0
k = list(jieba.cut(text))
d = {}
for i in k:
if i in stopword:
continue
if i in d:
d[i] += 1
else:
d[i] = 1
m1 = ['1',1]
m2 = ['2',0]
for i in d:
if int(d[i])>=m1[1]:
m2[0] = m1[0]
m2[1] = m1[1]
m1[1] = d[i]
m1[0] = i
elif d[i]>m2[1]:
m2[0] = i
m2[1] = d[i]
else:
m1 = [x,0]
m2 = [y,0]
keyword_ = []
if han_number(text)<word_len:
print(m1[0],m2[0])
p,xx,yy,ii,jj = lenc(m1[0],m2[0],text)
keyword_ = [xx,yy,ii,jj]
print(xx,yy)
min_txt = [text,p]
else:
jl = cut_j(text)
for str_ in jl:
print(m1[0],m2[0])
p,xx,yy,ii,jj = lenc(m1[0],m2[0],str_)
if min_txt[1]>p:
min_txt[0] = str_
min_txt[1] = p
keyword_ = [xx,yy,ii,jj]
#print(keyword_)
if min_txt[1]>word_len:
print('未找到适合的句子')
else:
print('关键词: ',xx,yy,'出现在下面这段话,且距离最近:')
print(min_txt)
return min_txt[0],ii,jj
import pandas as pd
#Example = namedtuple('Example', 'entity_1, entity_2, left, mention_1, middle, mention_2, right, ' )
def position__last_occering(entity_,text_):
#jieba.load_userdict("../user_data/userdict.txt") #加载自定义词典
jieba.load_userdict(os.path.join(filepath, './user_data/company.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/expert.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/leader.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/region.txt'))
jieba.load_userdict(os.path.join(filepath, './user_data/researcharea.txt'))
index = -1
while True:
end_index = index
index = str(text_).find(entity_,index + 1)
#if (len(text_) < (index + len(entity_) + 5)) & (end_index != -1):
#break
if index == -1:
break
print(end_index)
return end_index
def paragraph_sectioning_to7(entity_1,entity_2,text_):
p1 = str(text_).find(entity_1)
p2 = position__last_occering(entity_2,text_)
if (p1 < 0) or (p2 < 0):
print('出错:句中无实体名!',p1,p2,entity_1,entity_2,text_)
return "出错:句中无实体名!"
#print('entity_1 position: ',p1,'\n')
l1 = p1 + len(entity_1)
l = " ".join(jieba.cut(text_[:p1]))
#p2 = str(text_).find(entity_2)
m = " ".join(jieba.cut(text_[l1:p2]))
l2 = p2 + len(entity_2)
r = " ".join(jieba.cut(text_[l2:]))
tuple_7 = str(entity_1) + '\t' + str(entity_2) + '\t' + l.replace('\t',' ') + '\t' + str(entity_1) + '\t' + m.replace('\t',' ') + '\t' + str(entity_2) + '\t' + r.replace('\t',' ') + '\n'
#print('\n',tuple_7,'\n')
return tuple_7
#Example = namedtuple('Example', 'entity_1, entity_2, text_ ' )
# =============================================================================
# def paragraph_sectioning(text_): #3to7
#
# fields = text_[:].split('\t')
# print('(fields): ',len(fields),fields)
# if len(fields) != 3:
# return '0','0','0'
# #print(type(fields))
# entity_1 = fields[0]
# entity_2 = fields[1]
# min_text,i,j = position_mintxt(fields[2],x = entity_1, y = entity_2)
# #print('===============:',len(min_text), len(fields[2]))
# return paragraph_sectioning_to7(entity_1,entity_2,min_text),min_text,fields[2]
#
# =============================================================================
def paragraph_sectioning(text_,e1=None,e2=None): #3to7
if e2 is None:
if e1 is not None:
return "参数格式错" ,'0','0'
if e1 is None:
if e2 is not None:
return "参数格式错",'0','0'
fields = text_[:].split('\t')
#print('(fields): ',len(fields),fields)
if len(fields) != 3:
return '0','0','0'
#print(type(fields))
entity_1 = fields[0]
entity_2 = fields[1]
min_text,i,j = position_mintxt(fields[2],x = entity_1, y = entity_2)
#print('========1111111=======:',entity_1, entity_2, fields[2])
return paragraph_sectioning_to7(entity_1,entity_2,min_text),min_text,fields[2]
min_text,i,j = position_mintxt(text_,x = str(e1), y = str(e2))
#print('========2222222=======:',e1,e2, text_)
return paragraph_sectioning_to7(e1,e2,min_text),min_text,text_
Text_Minlen = 30
def preprocessing_xls_4train(src_filename):
data1 = pd.read_excel(src_filename,keep_default_na=False)
kb_ = {}
with open('./data/corpus.tsv','w', encoding='UTF-8') as f_corpus:
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#min_txt,position_1,position_2 = position_mintxt(line_[3],x=line_[0],y=line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],line_[3])
if len(tuple_7)<30:
continue
f_corpus.writelines(tuple_7)
if len(line_[1]) < 2:# 为空时 是负样本
continue
if str(line_[1]) not in kb_.keys():
kb_[str(line_[1])] = []
kb_triple_str = str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2])
#kb_[str(line_[1])].append(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]))
#print('--len(unrelated_pairs)-----------------------------',str(line_[1]),len(kb_[str(line_[1])]))
#f_kb.writelines(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]) + '\n')
if kb_triple_str not in kb_[str(line_[1])] :
kb_[str(line_[1])].append(kb_triple_str)
with open('./data/kb.tsv','w', encoding='UTF-8') as f_kb:
for rel_ in kb_.keys():
if len( kb_[rel_]) < 2: #某一个关系rel存在的KBTriple(rel, sbj, obj)少于2个,单个三元组存在的examples不会太多,比如实际中超不过20个
continue
for truple_ in kb_[rel_]:
f_kb.writelines(str(truple_) + '\n')
return '1'
def clean_xls_4train(src_filename):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('关系样本')
sheet.write(0, 0, "左实体1")
sheet.write(0, 1, "关系类型")
sheet.write(0, 2, "右实体")
sheet.write(0, 3, "语料")
i = 0
data1 = pd.read_excel(src_filename)
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
# if len(line_[1])<2: #为空时 是负样本
# continue
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#min_txt,position_1,position_2 = position_mintxt(line_[3],x=line_[0],y=line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],line_[3])
if len(tuple_7)<30:
continue
i = i+1
sheet.write(i, 0, line_[0])
sheet.write(i, 1, line_[1])
sheet.write(i, 2, line_[2])
sheet.write(i, 3, line_[3])
workbook.save(os.path.join(os.path.dirname(os.path.abspath(src_filename)),'cleaned_teain_corpus.xlsx'))
def preprocessing_xls_4pred(src_filename):
if not os.path.isfile(src_filename):
src_filename = os.path.join('../data',src_filename)
if not os.path.isfile(src_filename):
return 'xls文件不存在!'
data1 = pd.read_excel(src_filename)
dir_ = os.path.dirname(os.path.abspath(src_filename))
tsv_file = os.path.join(dir_,'test.tsv')
tsv_4section = os.path.join(dir_,'test_4section.tsv')
with open(tsv_file,'w', encoding='UTF-8') as f_corpus,open(tsv_4section,'w', encoding='UTF-8') as f2_corpus:
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
print('----------', len(line_))
if len(line_[0])<2:
continue
if len(line_[2])<2:
continue
if len(line_[3]) < Text_Minlen :
continue
#print('------&&&&--line_[3]--', line_[3])
min_text,i,j = position_mintxt(line_[3],x = line_[0], y = line_[2])
tuple_7 = paragraph_sectioning_to7(line_[0],line_[2],min_text)
if len(tuple_7)<30:
continue
tuple_4 = str(line_[0]) + '\t' + str(line_[2]) + '\t' + str(min_text).replace('\t',' ') + '\t' + str(line_[3]).replace('\t',' ') + '\n'
f2_corpus.writelines(tuple_4)
f_corpus.writelines(tuple_7)
return tsv_file #返回全路径
#print(dirpath)
if __name__=="__main__":
#preprocessing_xls_4pred('../user_data/pre.xls')
#clean_xls_4train('../user_data/所有关系0603.xls')
preprocessing_xls_4train('./user_data/t.xls') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/preprocessing_xls.py | preprocessing_xls.py |
from collections import Counter
import os
from relation_extraction import rel_ext
import pandas as pd
def simple_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.middle)
for word in ex.middle.split(' '):
feature_counter[word] += 5
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
return feature_counter
def left_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.left.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.left.split(' '):
feature_counter[word] += 1
return feature_counter
def right_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.right.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.right.split(' '):
feature_counter[word] += 1
return feature_counter
def train_(rex_ext_data_home='./data'):
#rex_ext_data_home = os.path.join('..','data')
# rex_ext_data_home_corpus = r'../data/rel_ext_data/corpus.tsv.gz'
# rex_ext_data_home_kb = r'../data/rel_ext_data/kb.tsv.gz'
# corpus = rel_ext.Corpus(rex_ext_data_home_corpus)
# kb = rel_ext.KB(rex_ext_data_home_kb)
corpus = rel_ext.Corpus(os.path.join(rex_ext_data_home,'corpus.tsv'))
kb = rel_ext.KB(os.path.join(rex_ext_data_home, 'kb.tsv'))
dataset = rel_ext.Dataset(corpus, kb)
dataset.count_examples()
dataset.count_relation_combinations()
#print(dataset)
# splits = dataset.build_splits()
# kbts_by_rel, labels_by_rel = dataset.build_dataset()
# all_relations = set(kbts_by_rel.keys())
train_result = rel_ext.train_models(
#all_relations,
featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer],
data=dataset
)
print(train_result)
# rel_ext.examine_model_weights(train_result)
if __name__ == '__main__':
train_() | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_train.py | rel_train.py |
from relation_extraction import rel_ext
import os
import pandas as pd
import xlrd, xlwt
from sklearn.metrics import precision_recall_fscore_support
import collections
from collections import namedtuple
def simple_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
for word in ex.middle.split(' '):
feature_counter[word] += 5
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.middle.split(' '):
feature_counter[word] += 1
return feature_counter
def left_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.left.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.left.split(' '):
feature_counter[word] += 1
return feature_counter
def right_bag_of_words_featurizer(kbt, corpus, feature_counter):
for ex in corpus.get_examples_for_entities(kbt.sbj, kbt.obj):
#print(ex.left)
for word in ex.right.split(' '):
feature_counter[word] += 1
for ex in corpus.get_examples_for_entities(kbt.obj, kbt.sbj):
for word in ex.right.split(' '):
feature_counter[word] += 1
return feature_counter
#d: defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
def prob2excel(d,ismass = False,dir_ = '../data'):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "关系类型")
sheet.write(0, 3, "概率")
i = 0
prob2text = ''
for pair_,value in d.items():
new_value = {}
prob2text = prob2text + str(pair_[0]) + ' \t' + str(pair_[1]) + ' : '
for rel_type in sorted(value,key=value.__getitem__,reverse=True):
i = i+1
sheet.write(i, 0, pair_[0])
sheet.write(i, 1, pair_[1])
sheet.write(i, 2, rel_type)
sheet.write(i, 3, value[rel_type])
new_value[rel_type] = value[rel_type]
if not ismass:
prob2text = prob2text + str(rel_type) + ' \t' + str(value[rel_type])
prob2text = prob2text + '<br> ' + '<br> '
d[pair_] = new_value
# =============================================================================
# for pair_,value in d.items():
# for rel_type, p in value.items():
# print('===============:',str(pair_[0]) , str(pair_[1]),rel_type,p)
#
# =============================================================================
if ismass :
if i>0:
workbook.save(os.path.join(dir_,'predicted_result.xlsx'))
return '预测结果保存到了 ' + dir_ + '\\predicted_result.xlsx'
else:
return 'do nothing'
return prob2text
def prob2excel_2(d,ismass = False,dir_ = '../data'):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "概率")
sheet.write(0, 3, "语料")
sheet.write(0, 4, "原语料")
i = 0
prob2text = ''
if ismass :
with open(os.path.join(dir_, 'test_4section.tsv'),'r', encoding='UTF-8') as f:
test_4section_data = f.readlines()
prob_dict_sorted = collections.defaultdict(dict)
for pair_,value in d.items():
prob2text = prob2text + str(pair_[0]) + ' \t' + str(pair_[1]) + ' : '
i = i+1
sheet.write(i, 0, pair_[0])
sheet.write(i, 1, pair_[1])
rel_value_str = ''
for rel_type in sorted(value,key=value.__getitem__,reverse=True):
rel_value_str = rel_value_str + str(rel_type) +':'+ str(value[rel_type])+'; '
prob_dict_sorted[str(pair_[0]) + ',' + str(pair_[1])][rel_type] = value[rel_type]
if not ismass:
prob2text = prob2text + str(rel_type) + ' \t' + str(value[rel_type])
prob2text = prob2text + '<br> ' + '<br> '
sheet.write(i, 2, rel_value_str)
if ismass :
for line in test_4section_data:
fields = line[:-1].split('\t')
#print('========fields=======:',len(fields))
if (fields[0] == pair_[0]) and (fields[1] == pair_[1]) :
sheet.write(i, 3, fields[2])
sheet.write(i, 4, fields[3])
break
if ismass :
if i>0:
workbook.save(os.path.join(dir_,'predicted_result.xlsx'))
return dir_ + '\\predicted_result.xlsx',None,None
else:
return 'do nothing',None,None
return 'ok',prob2text,prob_dict_sorted
def get_high_prob_excel(predicted_result_file = './data/predicted_result.xlsx', prob_threshold = 0.2):
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet('概率')
sheet.write(0, 0, "实体1")
sheet.write(0, 1, "实体2")
sheet.write(0, 2, "概率")
sheet.write(0, 3, "语料")
sheet.write(0, 4, "原语料")
i = 0
data1 = pd.read_excel(predicted_result_file)
for indexs in data1.index:
line_ = list(data1.loc[indexs].values[:])
fields = line_[2].split('; ')
high_prob = fields[0].split(':')
if float(high_prob[1]) < prob_threshold:
continue
i = i+1
sheet.write(i, 0, line_[0])
sheet.write(i, 1, line_[1])
sheet.write(i, 2, line_[2])
sheet.write(i, 3, line_[3])
sheet.write(i, 4, line_[4])
if i < 1:
return 'do nothing'
file_name = os.path.join(os.path.dirname(os.path.abspath(predicted_result_file)),'high_prob.xlsx')
workbook.save(file_name)
if not os.path.isfile(file_name):
return 'do nothing'
#print('precision', file_name)
return file_name
Example = namedtuple('Example',
'entity_1, entity_2, left, mention_1, middle, mention_2, right, '
)
def prediction_(rex_ext_data_home='./data',test_line = '',filename_ = ''):
#rex_ext_data_home = os.path.join('..','data')
if '.tsv' in filename_ :
if not os.path.isfile(filename_):
filename_ = os.path.join(rex_ext_data_home,filename_)
if not os.path.isfile(filename_):
#prob_dict = collections.defaultdict(dict)
return "失败:处理预测文件tsv出错!",None,None
corpus = rel_ext.Corpus(filename_)
abspath_ = os.path.dirname(os.path.abspath(filename_))
#print(dirpath)
is_mass = True
else:
is_mass = False
data_list = []
test_line = test_line[:].split('\t')
data_list.append(Example(*test_line))
#print(type(test_line),test_line)
corpus = rel_ext.Corpus(data_list)
kb = rel_ext.KB(os.path.join(rex_ext_data_home, 'kb.tsv'))
dataset = rel_ext.Dataset(corpus, kb)
#defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
#rel_prob_dict = collections.defaultdict(dict)
# data = pd.read_csv('../data/dev.tsv')
# splits = dataset.build_splits()
rel_prob_dict = rel_ext.find_new_relation_instances_new(
featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer,right_bag_of_words_featurizer],
test_split = dataset)
#if isinstance(rel_prob_dict,int):
if len(rel_prob_dict) < 1 :
return "失败:可能概率太低或已有该实体对及其关系",None,None
if is_mass :
return prob2excel_2(rel_prob_dict,ismass = is_mass,dir_ = abspath_)
return prob2excel_2(rel_prob_dict)
#import tensorflow as tf
#from transformers import BertTokenizer, TFAutoModelForSequenceClassification,TFPreTrainedModel
from relation_extraction.preprocessing_xls import paragraph_sectioning
if __name__ == '__main__':
# model = TFAutoModelForSequenceClassification.from_pretrained('D:/peking_code/code_python/relation_extraction/src/chinese_L-12_H-768_A-12/bert_config.json')
#model = TFBertForSequenceClassification.from_pretrained("chinese_L-12_H-768_A-12/bert_config.json")
# nlp_bert_lg = pipeline('feature-extraction',model=model,from_tf=True)
# print(len(nlp_bert_lg('Hugging Face is a French company based in New York.')))
#test_text7,min_text,original_text = paragraph_sectioning('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。国有企业是推进国家现代化、保障人民共同利益的重要力量,是党和国家事业发展的重要物质基础和政治基础。深化国有企业改革是坚持和发展中国特色社会主义的必然要求,深化国有企业改革是实现“两个一百年”奋斗目标的重大任务,深化国有企业改革是推动我国经济持续健康发展的客观要求。在明确国企深化改革的重要性后,郑新聪强调,下一步要牢牢把握国有企业改革的正确方向。首先,要坚持和完善基本经济制度。必须毫不动摇巩固和发展公有制经济,毫不动摇鼓励、支持、引导非公有制经济发展。坚持公有制主体地位,发挥国有经济主导作用,做强做优做大国有企业。其次,要坚持社会主义市场经济改革方向。遵循市场经济规律和企业发展规律,坚持政企分开、政资分开、所有权与经营权分离,坚持权利、义务、责任相统一,促使国有企业真正成为独立市场主体。再者,坚持以解放和发展生产力为标准。始终把握有利于国有资产保值增值、有利于提高国有经济竞争力、有利于放大国有资本功能的要求,着力破除束缚国有企业发展的体制机制障碍,发挥国有企业各类人才积极性、主动性、创造性。同时,坚持增强活力与强化监管相结合。增强活力是搞好国有企业的本质要求,强化监管是搞好国有企业的重要保障,必须处理好两者关系,切实做到有机统一。此外,要更加坚持党对国有企业的领导。坚持党对国有企业的领导是重大政治原则,必须一以贯之。2016年10月,习近平在全国国有企业党的建设工作会议上指出:中国特色现代国有企业制度,“特”就特在把党的领导融入公司治理各环节。党建写入章程真正融入国企中心工作,章程明确了党组织在公司法人治理结构中的法定地位,特别是党组织在决策、执行、监督各环节的权责和工作方式。值得一提的是,郑新聪充分肯定三钢集团公司党委探索出的党支部密切联系群众的“五小工作法”,通过为群众讲清小道理、解决小问题、办好小事情、选树小典型、开展小活动,实现党建工作与生产经营、职工生活有机融合。随后,郑新聪指出,省属企业要扎实做好新时期深化国有企业改革的重点任务。“省属企业要完善各类国有资产管理体制。建立健全各类国有资产监督法律法规体系。以管资本为主深化国有资产监管要加快国有经济布局优化、结构调整、战略性重组。”郑新聪指出,省属企业要围绕服务国家战略,推动国有经济向关系国家安全、国民经济命脉和国计民生的重要行业和关键领域、重点基础设施集中。加快处置低效无效资产,淘汰落后产能,剥离办社会职能,解决历史遗留问题,提高国有资本配置效率。日前,国务院国资委下发了《关于加强国有企业资产负债约束的指导意见》是落实党的十九大精神,推动国有企业降杠杆、防范化解国有企业债务风险的重要举措,促使高负债国有企业资产负债率尽快回归合理水平。郑新聪指出,近年来,福建省省属企业也呈现一批改革发展典型。三钢集团通过兼并重组整合区域资源,集团钢产量成功突破1100万吨,真正步入大型钢铁企业行列。特别是2014年重组三金钢铁有限公司,形成了现在的罗源闽光钢铁公司,通过优化机制,改善工艺,2016年扭亏为盈,2018年18月份盈利10.74亿元,资产负债率从90降至目前的40,让一个濒临倒闭的企业成为一个福州区域明星企业,成为钢铁行业兼并重组成功典范。星网锐捷旗下凯米网络科技有限公司积极探索商业模式创新,向KTV提供“管理、流量、内容、广告”四大核心价值,构建互联网聚会娱乐新生态,用户超7500万,成为行业独角兽。发展混合所有制经济亦是新时期深化国有企业改革的重点任务。积极推进主业处于充分竞争行业和领域的商业类国有企业混合所有制改革,有效探索重点领域混合所有制改革,在引导子公司层面改革的同时探索在集团公司层面推进混合所有制改革。大力推动国有企业改制上市。稳妥有序开展国有控股混合所有制企业员工持股。此外,形成有效制衡的公司法人治理结构和灵活高效的市场化经营机制,加强监管有效防止国有资产流失。以国有资产保值增值、防止流失为目标,加强对企业关键业务、改革重点领域、国有资本运营重要环节的监督。建立健全国有企业重大决策失误和失职、渎职责任追究倒查机制。加强审计监督、纪检监督、巡查监督,形成监督合力。郑新聪表示,培育具有全球竞争力的世界一流企业也是目前省属企业的重点任务之一。支持国有企业深入开展国际化经营,在“一带一路”建设中推动优势产业走出去。')
#test_text7 = paragraph_sectioning(str('郑新聪 国资国企改革发展 要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,牢牢把握国有企业改革的正确方向。李南轩摄学习宣传贯彻党的十九大精神是全党全国当前和今后一个时期的首要政治任务。如何学习贯彻好党的十九大精神,习近平总书记在十九届中央政治局第一次集体学习时,提出要在学懂弄通做实上下功夫,号召“全党来一个大学习”。日前,福建全省各个领域、各条战线、各行各业兴起习近平新时代中国特色社会主义思想“大学习”热潮。福建省副省长郑新聪前些时候深入三钢集团福建罗源闽光钢铁有限责任公司一线,开展习近平新时代中国特色社会主义思想宣讲。宣讲会前,郑新聪一行深入到罗源闽光公司炼钢厂,沿着参观通道边走边看边听汇报,详细了解罗源闽光公司在绿色发展、技术指标、科技创新、经济效益等方面情况。在随后的宣讲会上,郑新聪以“深入学习习近平新时代中国特色社会主义思想深化和推动国有企业改革发展”为党课主题,分别从习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述、新时代国资国企改革发展肩负新的历史使命、坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展三个方面作了深刻阐释。就下一步如何推进新时代国资国企改革发展,郑新聪要求,要坚持用习近平新时代中国特色社会主义思想指导福建国资国企改革发展,深刻认识深化国有企业改革的重大意义,牢牢把握国有企业改革的正确方向。以新发展理念推动国企发展宣讲中,郑新聪与参会人员共同学习回顾了习近平新时代中国特色社会主义思想关于“推动我国经济高质量发展”的论述:目前,我国经济已由高速增长阶段转向高质量发展阶段。推动高质量发展是保持经济持续健康发展的必然要求;推动高质量发展是适应我国社会主要矛盾变化的必然要求;推动高质量发展是遵循经济规律发展的必然要求。此外,实现高质量发展必须坚持和践行新发展理念。发展是解决我国一切问题的基础和关键,发展必须是科学发展,必须坚定不移贯彻创新、协调、绿色、开放、共享的发展理念。新发展理念是习近平新时代中国特色社会主义经济思想的主要内容,在推进我国经济高质量发展过程中,必须坚定不移贯彻。为推动我国经济高质量发展,我们要坚持适应把握引领经济发展新常态,要把推进供给侧结构性改革作为经济工作的主线,要建设现代化经济体系。针对以上论述,郑新聪强调,全体成员要把握领会习近平新时代中国特色社会主义思想精神,特别是关于深化和推动国有企业改革发展方面,以此推动国企高质量发展。新时代国资国企肩负新使命郑新聪指出,党的十九大提出“要完善各类国有资产管理体制,改革国有资本授权经营体制,加快国有经济布局优化、结构调整、战略性重组,促进国有资产保值增值,推动国有资本做强做优做大,有效防止国有资产流失;深化国有企业改革,发展混合所有制经济,培育具有全球竞争力的世界一流企业。”这“九句话、109字”为国资国企改革发展指明了前进的方向,是我们推进下一步工作的重要行动指南。郑新聪表示,首先要深刻认识深化国有企业改革的重大意义。'))
#prediction_(test_line = test_text7)
prediction_(filename_ = 'test.tsv')
#get_high_prob_excel(predicted_result_file = '../user_data/predicted_result0602.xlsx', prob_threshold = 0.8)
# =============================================================================
# predictions, assess_o = rel_ext.predict_new(
# featurizers=[left_bag_of_words_featurizer,simple_bag_of_words_featurizer],
# assess_dataset = dataset)
# df = pd.DataFrame(columns=['实体1','实体2','实体关系'])
# sbjs, objs, pre = [],[],[]
# for item in assess_o.items():
# for i in item[1]:
# sbjs.append(i.sbj)
# objs.append(i.obj)
# for i in predictions.items():
# for j in i[1]:
# if j == True:
# pre.append(i[0])
# else:
# pre.append('not ' + i[0])
# df['实体1'] = sbjs
# df['实体2'] = objs
# df['实体关系'] = pre
# df.to_excel('../data/result.xlsx',index=False)
# =============================================================================
# df = pd.read_excel('../data/result.xlsx')
# predictions = df['实体关系']
# true_labels = df['label']
# predictions=[True if i == '调研' else False for i in predictions]
# true_labels = [True if i == '调研' else False for i in true_labels]
# # rel_ext.evaluate_predictions(predictions, true_labels)
# stats = precision_recall_fscore_support(true_labels, predictions, labels=[True, False])
# print('precision', 'recall', 'f-score', 'support')
# statss = [round(stat[0], 3)for stat in stats]
# stats = [round(stat[1], 3) for stat in stats]
# print(statss)
# print(stats) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_prediction.py | rel_prediction.py |
from collections import Counter, defaultdict, namedtuple
import gzip
import numpy as np
import os
import random
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_recall_fscore_support
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import joblib
import pickle
import pandas as pd
__author__ = "Bill MacCartney"
__version__ = "CS224u, Stanford, Spring 2019"
Example = namedtuple('Example',
'entity_1, entity_2, left, mention_1, middle, mention_2, right, '
)
class Corpus(object):
def __init__(self, src_filename_or_examples):
if isinstance(src_filename_or_examples, str):
self.examples = self.read_examples(src_filename_or_examples)
else:
self.examples = src_filename_or_examples
self.examples_by_entities = {}
self._index_examples_by_entities()
@staticmethod
#解压语料corpus
# def read_examples(src_filename):
# examples = []
# with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
# for line in f:
# fields = line[:-1].split('\t')
# examples.append(Example(*fields))
# return examples
def read_examples(src_filename):
examples = []
if '.gz' in src_filename:
with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
for line in f:
fields = line[:-1].split('\t')
examples.append(Example(*fields))
else:
if '.xls' in src_filename:
data1 = pd.read_excel(src_filename)
fields = []
with open('../data/kb.tsv','w', encoding='UTF-8') as f:
for indexs in data1.index:
if len(data1.loc[indexs].values[3]) < 30 :
continue
line_ = list(data1.loc[indexs].values[:])
fields.append(line_[0])
fields.append(line_[2])
#fields.append(paragraph_ sectioning(line_[3]))
#examples.append(Example(*fields))
f.writelines(str(line_[1]) + '\t' + str(line_[0]) + '\t' + str(line_[2]) + '\n')
else:
with open(src_filename,'r', encoding='UTF-8') as f:
data = f.readlines()
for line in data:
#print(type(line))
fields = line[:-1].split('\t')
#print(type(fields))
fields = fields[:7] #202005 add
examples.append(Example(*fields))
return examples
def input_examples(self,data):
examples = []
for line in data:
fields = line[:-1].split('\t')
examples.append(Example(*fields))
print(Example(*fields))
self.examples = examples
return examples
def _index_examples_by_entities(self):
for ex in self.examples:
if ex.entity_1 not in self.examples_by_entities:
self.examples_by_entities[ex.entity_1] = {}
if ex.entity_2 not in self.examples_by_entities[ex.entity_1]:
self.examples_by_entities[ex.entity_1][ex.entity_2] = []
self.examples_by_entities[ex.entity_1][ex.entity_2].append(ex)
def get_examples_for_entities(self, e1, e2):
try:
return self.examples_by_entities[e1][e2]
except KeyError:
return []
# 展示第一个example
def show_examples_for_pair(self, e1, e2):
exs = self.get_examples_for_entities(e1, e2)
if exs:
print('The first of {0:,} examples for {1:} and {2:} is:'.format(
len(exs), e1, e2))
print(exs[0])
else:
print('No examples for {0:} and {1:}'.format(e1, e2))
def __str__(self):
return 'Corpus with {0:,} examples'.format(len(self.examples))
def __repr__(self):
return str(self)
def __len__(self):
return len(self.examples)
KBTriple = namedtuple('KBTriple', 'rel, sbj, obj')
class KB(object):
def __init__(self, src_filename_or_triples):
if isinstance(src_filename_or_triples, str):
self.kb_triples = self.read_kb_triples(src_filename_or_triples)
else:
self.kb_triples = src_filename_or_triples
self.all_relations = []
self.all_entity_pairs = []
self.kb_triples_by_relation = {}
self.kb_triples_by_entities = {}
self._collect_all_entity_pairs()
self._index_kb_triples_by_relation()
self._index_kb_triples_by_entities()
@staticmethod
# 解压kb,获得所有的三元组kb_triples
def read_kb_triples(src_filename):
kb_triples = []
if '.gz' in src_filename:
with gzip.open(src_filename, mode='rt', encoding='utf8') as f:
for line in f:
rel, sbj, obj = line[:-1].split('\t')
kb_triples.append(KBTriple(rel, sbj, obj))
else:
with open(src_filename,'r', encoding='UTF-8') as f:
data = f.readlines()
for line in data:
rel, sbj, obj = line[:-1].split('\t')
kb_triples.append(KBTriple(rel, sbj, obj))
return kb_triples
#获得kb中的所有二元组实体
def _collect_all_entity_pairs(self):
pairs = set()
for kbt in self.kb_triples:
pairs.add((kbt.sbj, kbt.obj))
self.all_entity_pairs = sorted(list(pairs))
# 获得kb中的all_relations
def _index_kb_triples_by_relation(self):
for kbt in self.kb_triples:
if kbt.rel not in self.kb_triples_by_relation:
self.kb_triples_by_relation[kbt.rel] = []
self.kb_triples_by_relation[kbt.rel].append(kbt)
self.all_relations = sorted(list(self.kb_triples_by_relation))
#寻找同一人名实体的三元组
def _index_kb_triples_by_entities(self):
for kbt in self.kb_triples:
if kbt.sbj not in self.kb_triples_by_entities:
self.kb_triples_by_entities[kbt.sbj] = {}
if kbt.obj not in self.kb_triples_by_entities[kbt.sbj]:
self.kb_triples_by_entities[kbt.sbj][kbt.obj] = []
self.kb_triples_by_entities[kbt.sbj][kbt.obj].append(kbt)
# print(self.kb_triples_by_entities[kbt.sbj][kbt.obj])
# 获取指定关系的三元组
def get_triples_for_relation(self, rel):
try:
return self.kb_triples_by_relation[rel]
except KeyError:
return []
def get_triples_for_entities(self, e1, e2):
try:
return self.kb_triples_by_entities[e1][e2]
except KeyError:
return []
def __str__(self):
return 'KB with {0:,} triples'.format(len(self.kb_triples))
def __repr__(self):
return str(self)
def __len__(self):
return len(self.kb_triples)
class Dataset(object):
def __init__(self, corpus, kb):
self.corpus = corpus
self.kb = kb
# 获取测试集中的实体二元组
def find_unrelated_pairs(self, to_tsv=None):
unrelated_pairs = set()
if to_tsv is None:
for ex in self.corpus.examples:
if self.kb.get_triples_for_entities(ex.entity_1, ex.entity_2):
continue
#if self.kb.get_triples_for_entities(ex.entity_2, ex.entity_1): #20200527 ommit
#continue
unrelated_pairs.add((ex.entity_1, ex.entity_2))
print(unrelated_pairs)
#unrelated_pairs.add((ex.entity_2, ex.entity_1))#20200527 ommit
return unrelated_pairs
with open('../data/corpus_unrelated.tsv','w',encoding='utf-8') as f:
for ex in self.corpus.examples:
if self.kb.get_triples_for_entities(ex.entity_1, ex.entity_2):
continue
#if self.kb.get_triples_for_entities(ex.entity_2, ex.entity_1):#20200527 ommit
#continue
unrelated_pairs.add((ex.entity_1, ex.entity_2))
#unrelated_pairs.add((ex.entity_2, ex.entity_1))#20200527 ommit
f.write(ex.entity_1 + '\t' + ex.entity_2)
f.write('\n')
#print(unrelated_pairs)
return unrelated_pairs
# 特征
def featurize(self, kbts_by_rel, featurizers, vectorizer=None):
# Create feature counters for all instances (kbts).
feat_counters_by_rel = defaultdict(list)
for rel, kbts in kbts_by_rel.items():
for kbt in kbts:
#print(kbt)
feature_counter = Counter()
for featurizer in featurizers:
feature_counter = featurizer(kbt, self.corpus, feature_counter)
feat_counters_by_rel[rel].append(feature_counter)
feat_matrices_by_rel = defaultdict(list)
# If we haven't been given a Vectorizer, create one and fit
# it to all the feature counters.
if vectorizer is None:
vectorizer = DictVectorizer(sparse=True)
def traverse_dicts():
for dict_list in feat_counters_by_rel.values():
for d in dict_list:
yield d
vectorizer.fit(traverse_dicts())
# Now use the Vectorizer to transform feature dictionaries
# into feature matrices.
for rel, feat_counters in feat_counters_by_rel.items():
#print(feat_counters)
#print('\n\r')
feat_matrices_by_rel[rel] = vectorizer.transform(feat_counters)
#print('\n feat_matrices_by_rel[rel]...................',type(feat_matrices_by_rel[rel]))
return feat_matrices_by_rel, vectorizer
# 创建输入的dataset,获取未出现在训练集的实体二元组,负样本以0.1的比例输入,将负样本或测试集中的label打为false
def build_dataset(self,
include_positive=True,
sampling_rate=1,
seed=1):
unrelated_pairs = self.find_unrelated_pairs()
random.seed(seed)
print('--len(unrelated_pairs)-----------------------------',len(unrelated_pairs))
unrelated_pairs = random.sample(
unrelated_pairs, int(sampling_rate * len(unrelated_pairs)))
kbts_by_rel = defaultdict(list)
labels_by_rel = defaultdict(list)
for index, rel in enumerate(self.kb.all_relations):
ii = 0
if include_positive:
for kbt in self.kb.get_triples_for_relation(rel):
kbts_by_rel[rel].append(kbt)
labels_by_rel[rel].append(True)
for index2, rel2 in enumerate(self.kb.all_relations): #将其他关系类型作为负样本 20200531 add
if index2 == index :
continue
for kbt_ in self.kb.get_triples_for_relation(rel2):
kbts_by_rel[rel].append(kbt_)
labels_by_rel[rel].append(False)
ii = ii + 1
for sbj, obj in unrelated_pairs:
kbts_by_rel[rel].append(KBTriple(rel, sbj, obj))
#print(KBTriple(rel, sbj, obj))
labels_by_rel[rel].append(False)
ii = ii + 1
#print('--index, rel----total--unrelated--',index, rel,len(self.kb.get_triples_for_relation(rel) ),ii)
return kbts_by_rel, labels_by_rel
# ============================================================================================
def count_examples(self):
counter = Counter()
for rel in self.kb.all_relations:
for kbt in self.kb.get_triples_for_relation(rel):
# count examples in both forward and reverse directions
counter[rel] += len(self.corpus.get_examples_for_entities(kbt.sbj, kbt.obj))
counter[rel] += len(self.corpus.get_examples_for_entities(kbt.obj, kbt.sbj))
# report results
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'', '', '', 'examples'))
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'relation', 'examples', 'triples', '/triple'))
print('{:20s} {:>10s} {:>10s} {:>10s}'.format(
'--------', '--------', '-------', '-------'))
for rel in self.kb.all_relations:
nx = counter[rel]
nt = len(self.kb.get_triples_for_relation(rel))
print('{:20s} {:10d} {:10d} {:10.2f}'.format(
rel, nx, nt, 1.0 * nx / nt))
def count_relation_combinations(self):
counter = Counter()
for sbj, obj in self.kb.all_entity_pairs:
rels = tuple(sorted({kbt.rel for kbt in self.kb.get_triples_for_entities(sbj, obj)}))
if len(rels) > 1:
counter[rels] += 1
counts = sorted([(count, key) for key, count in counter.items()], reverse=True)
print('The most common relation combinations are:')
for count, key in counts:
print('{:10d} {}'.format(count, key))
def __str__(self):
return "{}; {}".format(self.corpus, self.kb)
def __repr__(self):
return str(self)
def print_statistics_header():
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'relation', 'precision', 'recall', 'f-score', 'support', 'size'))
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
def make_dirs(path):
dir_path = os.path.join(os.getcwd(),path)
if not os.path.isdir(dir_path): # 无文件夹时创建
os.makedirs(dir_path)
# def print_statistics_row(rel, result):
# print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:10d} {:10d}'.format(rel, *result))
def print_statistics_row(rel, result):
print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:.0f} {:10d}'.format(rel, *result))
# def print_statistics_footer(avg_result):
# print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
# '-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
# print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:10d} {:10d}'.format('macro-average', *avg_result))
def print_statistics_footer(avg_result):
print('{:20s} {:>10s} {:>10s} {:>10s} {:>10s} {:>10s}'.format(
'-' * 18, '-' * 9, '-' * 9, '-' * 9, '-' * 9, '-' * 9))
print('{:20s} {:10.3f} {:10.3f} {:10.3f} {:.0f} {:10d}'.format('macro-average', *avg_result))
def macro_average_results(results):
avg_result = [np.average([r[i] for r in results.values()]) for i in range(3)]
avg_result.append(np.sum([r[3] for r in results.values()]))
avg_result.append(np.sum([r[4] for r in results.values()]))
return avg_result
def evaluate(splits, classifier, test_split='dev', verbose=True):
test_kbts_by_rel, true_labels_by_rel = splits[test_split].build_dataset()
results = {}
if verbose:
print_statistics_header()
for rel in splits['all'].kb.all_relations:
pred_labels = classifier(test_kbts_by_rel[rel])
stats = precision_recall_fscore_support(true_labels_by_rel[rel], pred_labels, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(pred_labels)) # number of examples
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def evaluate_new(classifier, all_relations,data,verbose=True):
test_kbts_by_rel, true_labels_by_rel = data.build_dataset()
results = {}
if verbose:
print_statistics_header()
for rel in all_relations:
pred_labels = classifier(test_kbts_by_rel[rel])
stats = precision_recall_fscore_support(true_labels_by_rel[rel], pred_labels, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(pred_labels)) # number of examples
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def train_models(
# splits,
#all_relations,
featurizers,
data,
# split_name='train',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
verbose=True):
train_dataset = data
# print(train_dataset)
train_o, train_y = train_dataset.build_dataset()
all_relations = set(train_o.keys())
# print(train_o,train_y)
train_X, vectorizer = train_dataset.featurize(train_o, featurizers)
models = {}
make_dirs('./data/saved_model')
with open('./data/saved_model/data.pkl', 'wb') as save1:
tuple_objects = (featurizers, vectorizer, all_relations)
pickle.dump(tuple_objects, save1)
for rel in all_relations:
models[rel] = model_factory()
models[rel].fit(train_X[rel], train_y[rel])
#print('\n models[rel].fit...................',rel,train_X[rel].shape[0])
joblib.dump( models[rel], './data/saved_model/' + rel + '_model.pkl')
return {
'featurizers': featurizers,
'vectorizer': vectorizer,
'models': models,
'all_relations': all_relations}
def predict(splits, train_result, split_name='dev'):
assess_dataset = splits[split_name]
assess_o, assess_y = assess_dataset.build_dataset()
test_X, _ = assess_dataset.featurize(
assess_o,
featurizers=train_result['featurizers'],
vectorizer=train_result['vectorizer'])
# print(test_X)
predictions = {}
for rel in train_result['all_relations']:
predictions[rel] = train_result['models'][rel].predict(test_X[rel])
return predictions, assess_y
# ==================================================================================================================
def predict_new(assess_dataset,featurizers):
# assess_dataset = splits[split_name]
assess_o, assess_y = assess_dataset.build_dataset(
include_positive=False,
sampling_rate=1)
# print(assess_o)
fp = open('../data/saved_model/data.pkl', 'rb') #202005 add
featurizer, vectorizer, all_relations = pickle.load(fp)
test_X, _ = assess_dataset.featurize(
assess_o,
featurizers=featurizers,
vectorizer=vectorizer)
predictions = {}
for rel in all_relations:
if test_X[rel].shape[0] < 1:
continue
model = joblib.load('../data/saved_model/' + rel + '_model.pkl')
predictions[rel] = model.predict(test_X[rel])
print(rel,predictions[rel])
fp.close()
return predictions,assess_o
def evaluate_predictions(predictions, test_y, verbose=True):
results = {} # one result row for each relation
if verbose:
print_statistics_header()
for rel, preds in predictions.items():
print()
stats = precision_recall_fscore_support(test_y[rel], preds, beta=0.5)
stats = [stat[1] for stat in stats] # stats[1] is the stat for label True
stats.append(len(test_y[rel]))
results[rel] = stats
if verbose:
print_statistics_row(rel, results[rel])
avg_result = macro_average_results(results)
if verbose:
print_statistics_footer(avg_result)
return avg_result[2] # return f_0.5 score as summary statistic
def experiment(
splits,
featurizers,
train_split='train',
test_split='dev',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
verbose=True):
train_result = train_models(
splits,
featurizers=featurizers,
split_name=train_split,
model_factory=model_factory,
verbose=verbose)
predictions, test_y = predict(
splits,
train_result,
split_name=test_split)
evaluate_predictions(
predictions,
test_y,
verbose)
return train_result
def examine_model_weights(train_result, k=3, verbose=True):
feature_names = train_result['vectorizer'].get_feature_names()
for rel, model in train_result['models'].items():
print('Highest and lowest feature weights for relation {}:\n'.format(rel))
try:
coefs = model.coef_.toarray()
except AttributeError:
coefs = model.coef_
sorted_weights = sorted([(wgt, idx) for idx, wgt in enumerate(coefs[0])], reverse=True)
for wgt, idx in sorted_weights[:k]:
print('{:10.3f} {}'.format(wgt, feature_names[idx]))
print('{:>10s} {}'.format('.....', '.....'))
for wgt, idx in sorted_weights[-k:]:
print('{:10.3f} {}'.format(wgt, feature_names[idx]))
print('\n')
def find_new_relation_instances(
dataset,
featurizers,
train_split='train',
test_split='dev',
model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
k=10,
verbose=True):
splits = dataset.build_splits()
# train models
train_result = train_models(
splits,
split_name=train_split,
featurizers=featurizers,
model_factory=model_factory,
verbose=True)
test_split = splits[test_split]
neg_o, neg_y = test_split.build_dataset(
include_positive=False,
sampling_rate=1.0)
neg_X, _ = test_split.featurize(
neg_o,
featurizers=featurizers,
vectorizer=train_result['vectorizer'])
# Report highest confidence predictions:
for rel, model in train_result['models'].items():
print(train_result['models'].items())
print('Highest probability examples for relation {}:\n'.format(rel))
probs = model.predict_proba(neg_X[rel])
probs = [prob[1] for prob in probs] # probability for class True
sorted_probs = sorted([(p, idx) for idx, p in enumerate(probs)], reverse=True)
for p, idx in sorted_probs[:k]:
print('{:10.3f} {}'.format(p, neg_o[rel][idx]))
print()
def find_new_relation_instances_new(
# dataset,
featurizers,
# train_split='train',
# test_split='dev',
# file,
test_split,
# model_factory=lambda: LogisticRegression(fit_intercept=True, solver='liblinear'),
k=10,
# verbose=True
):
# train models
# train_result = train_models(
# splits,
# split_name=train_split,
# featurizers=featurizers,
# model_factory=model_factory,
# verbose=True)
# test_split = splits[test_split]
fp = open('./data/saved_model/data.pkl', 'rb') #202005 add
featurizers1, vectorizer, all_relations = pickle.load(fp)
neg_o, neg_y = test_split.build_dataset(
include_positive=False,
sampling_rate=1.0)
# print(len(neg_y))
neg_X, _ = test_split.featurize(
neg_o,
featurizers=featurizers,
vectorizer=vectorizer)
# Report highest confidence predictions:
fp.close()
import collections
#defaultdict(<class 'dict'>, {('实体1','实体2'): {'关系1': 0.625, '关系2': 0.0, ...}, ('实体x','实体y'): {'关系1': 0.625, '关系2': 0.0, ...}})
rel_prob_dict = collections.defaultdict(dict)
if len(neg_X) < 1 :
return rel_prob_dict
for rel in all_relations:
if neg_X[rel].shape[0] < 1: #202004 add
continue
model = joblib.load('./data/saved_model/' + rel + '_model.pkl')
#print('\n Highest probability examples for relation {}:'.format(rel)) #ommit 20200527
#print(neg_X[rel])
probs = model.predict_proba(neg_X[rel])
probs = [prob[1] for prob in probs] # probability for class True
sorted_probs = sorted([(p, idx) for idx, p in enumerate(probs)], reverse=True)
for p, idx in sorted_probs:
if p >0.01:
rel_prob_dict[(neg_o[rel][idx].sbj,neg_o[rel][idx].obj)][rel] = round(p,3) #add at 2020
#print ('{:10.3f} {}'.format(p, neg_o[rel][idx]))#ommit 20200527
return rel_prob_dict
def bake_off_experiment(train_result, rel_ext_data_home, verbose=True):
test_corpus_filename = os.path.join(rel_ext_data_home, "corpus-test.tsv.gz")
test_kb_filename = os.path.join(rel_ext_data_home, "kb-test.tsv.gz")
corpus = Corpus(test_corpus_filename)
kb = KB(test_kb_filename)
test_dataset = Dataset(corpus, kb)
test_o, test_y = test_dataset.build_dataset()
test_X, _ = test_dataset.featurize(
test_o,
featurizers=train_result['featurizers'],
vectorizer=train_result['vectorizer'])
predictions = {}
for rel in train_result['all_relations']:
predictions[rel] = train_result['models'][rel].predict(test_X[rel])
evaluate_predictions(
predictions,
test_y,
verbose=verbose) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/relation_extraction/rel_ext.py | rel_ext.py |
import os
import pickle
from catl.utilities import preprocess_train
from catl.model import ensemble
from openpyxl import Workbook
name = input('Please input the name of company: ')
current_path = os.getcwd()
if os.path.isdir('data/'+name+'/preprocess') == False:
os.makedirs(r'data/'+name+'/preprocess')
if os.path.isdir('results/'+name+'/train/model/') == False:
os.makedirs(r'results/'+name+'/train/model/')
if os.path.isdir('results/'+name+'/train/results/') == False:
os.makedirs(r'results/'+name+'/train/results/')
# print(os.getcwd())
preprocess = preprocess_train(name=name,path=r'data/'+name+'/'+name)
preprocess.read_excel()
Original_Data,Original_Data_Useless,Labels = preprocess.excel2sentences()
Vocabulary_Title = preprocess.get_vocabulary_title(title_weight=5,feature_ratio=0.1) # feature_ratio可调节,用来控制词表的长度,防止词表过长,运行时间太长或者内存溢出。
TFIDF_Title,IDF_Title = preprocess.get_tfidf_title(title_weight=5) # title_weight可调节,用于标题重复几次,增加标题的作用。
with open('data/'+name+'/preprocess/'+name+'_vocabulary_title.pkl','wb') as save1:
pickle.dump(Vocabulary_Title,save1)
with open('data/'+name+'/preprocess/'+name+'_idf_title.pkl','wb') as save2:
pickle.dump(IDF_Title,save2)
Model = ensemble(name=name,r=0.95,data=TFIDF_Title,labels=Labels,model_save_path='results/'+name+'/train/model/',results_save_path='results/'+name+'/train/results/') # r可调节,训练在召回率低于r时停止过滤进入下阶段过滤。
Threshold,Index_Retain_Predict_Title,Index_Delete_Title = Model.train_title()
Vocabulary_Content = preprocess.get_vocabulary_content(feature_ratio=0.2,index=Index_Retain_Predict_Title) # feature_ratio可调节,用来控制词表的长度,防止词表过长,运行时间太长或者内存溢出。
TFIDF_Content,IDF_Content = preprocess.get_tfidf_content(index=Index_Retain_Predict_Title)
with open('data/'+name+'/preprocess/'+name+'_vocabulary_content.pkl','wb') as save3:
pickle.dump(Vocabulary_Content,save3)
with open('data/'+name+'/preprocess/'+name+'_idf_content.pkl','wb') as save4:
pickle.dump(IDF_Content,save4)
threshold,Index_Retain_Predict_Content,Index_Delete_Content = Model.train_content(data=TFIDF_Content,r=0.9) # r可调节,训练最终在召回率低于r时终止。
with open('results/'+name+'/train/model/'+'title_threshold.pkl','wb') as save5:
pickle.dump(Threshold,save5)
with open('results/'+name+'/train/model/'+'content_threshold.pkl','wb') as save6:
pickle.dump(threshold,save6)
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'finally'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Retain_Predict_Content)):
worksheet1.cell(row=i+2,column=1).value = Original_Data[Index_Retain_Predict_Content[i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=2).value = Original_Data[Index_Retain_Predict_Content[i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=3).value = Original_Data[Index_Retain_Predict_Content[i]][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet2 = workbook.create_sheet('delete through key words')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
for i in range(len(Original_Data_Useless)):
worksheet2.cell(row=i+2,column=1).value = Original_Data_Useless[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=2).value = Original_Data_Useless[i][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=3).value = Original_Data_Useless[i][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet3 = workbook.create_sheet('delete through content')
worksheet3.cell(row=1,column=1).value = 'title'
worksheet3.cell(row=1,column=2).value = 'content'
worksheet3.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Delete_Content)):
worksheet3.cell(row=i+2,column=1).value = Original_Data[Index_Delete_Content[i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet3.cell(row=i+2,column=2).value = Original_Data[Index_Delete_Content[i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet3.cell(row=i+2,column=3).value = Original_Data[Index_Delete_Content[i]][2].encode('gbk','ignore').decode('gbk','ignore')
for ite in range(len(Index_Delete_Title)):
worksheet = workbook.create_sheet('delete through title '+str(ite+1))
worksheet.cell(row=1,column=1).value = 'title'
worksheet.cell(row=1,column=2).value = 'content'
worksheet.cell(row=1,column=3).value = 'label'
for i in range(len(Index_Delete_Title[ite+1])):
worksheet.cell(row=i+2,column=1).value = Original_Data[Index_Delete_Title[ite+1][i]][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=i+2,column=2).value = Original_Data[Index_Delete_Title[ite+1][i]][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet.cell(row=i+2,column=3).value = Original_Data[Index_Delete_Title[ite+1][i]][2].encode('gbk','ignore').decode('gbk','ignore')
workbook.save('results/'+name+'/train/results/train_results.xlsx') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/catl/train.py | train.py |
import os
import pickle
import xlrd
import re
import jieba
from openpyxl import Workbook
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
from sklearn import metrics
from sklearn.externals import joblib
def document2sentences(document,key_words):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(key_words)):
if key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(key_words)):
if key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(words,chinese_stopwords):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
name = input('Please input the name of company: ')
current_path = os.getcwd()
if os.path.isdir('results/'+name+'/predict/results/') == False:
os.makedirs(r'results/'+name+'/predict/results/')
path = 'data/'+name+'/'+name
model_load_path = 'results/'+name+'/train/model/'
chinese_stopwords = []
for line in open('data/stopwords.txt','rb'):
chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
key_words = []
for line in open(path+'_original_key_words.txt','rb'):
key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(path+'_original_key_words.txt')
with open('data/'+name+'/preprocess/'+name+'_vocabulary_title.pkl','rb') as load1:
vocabulary_title = pickle.load(load1)
with open('data/'+name+'/preprocess/'+name+'_idf_title.pkl','rb') as load2:
idf_title = pickle.load(load2)
with open('results/'+name+'/train/model/'+'title_threshold.pkl','rb') as load3:
Threshold = pickle.load(load3)
with open('results/'+name+'/train/model/'+'content_threshold.pkl','rb') as load4:
threshold = pickle.load(load4)
with open('data/'+name+'/preprocess/'+name+'_vocabulary_content.pkl','rb') as load5:
vocabulary_content = pickle.load(load5)
with open('data/'+name+'/preprocess/'+name+'_idf_content.pkl','rb') as load6:
idf_content = pickle.load(load6)
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'retain'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
worksheet2 = workbook.create_sheet('delete')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
count_retain = 2
count_delete = 2
excel = xlrd.open_workbook(path+'_test.xls')
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
Labels = []
Predictions = []
for idx in range(1,num_rows):
original_data = table.row_values(idx)
label = int(original_data[2]=='保留')
Labels.append(label)
content = original_data[1]
content_sentences = document2sentences(content,key_words)
if content_sentences == '':
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
else:
title = original_data[0]
title_tokenized = jieba.lcut(title)
content_sentences_tokenized = jieba.lcut(content_sentences)
title_tokenized_filtered = filtrate_words(title_tokenized,chinese_stopwords)
content_sentences_tokenized_filtered = filtrate_words(content_sentences_tokenized,chinese_stopwords)
data_title = [5*(title_tokenized_filtered+' ')+content_sentences_tokenized_filtered]
tf_transformer_title = CountVectorizer(ngram_range=(1,3),vocabulary=vocabulary_title)
tf_title = tf_transformer_title.fit_transform(data_title)
tf_weight_title = tf_title.toarray().tolist()
tfidf_weight_title = normalize([[x*y for x,y in zip(tf_weight_title[0],idf_title)]], norm='l2').tolist()
for ite in range(1,len(Threshold)+1):
clf_title = joblib.load(model_load_path+name+'_iteration_'+str(ite)+'_train_title_classifier.m')
tmp = clf_title.predict_proba(tfidf_weight_title).tolist()
if tmp[0][1] < Threshold[ite]:
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
ite -= 1
break
else:
continue
if ite == len(Threshold):
data_content = [content_sentences_tokenized_filtered]
tf_transformer_content = CountVectorizer(ngram_range=(1,3),vocabulary=vocabulary_content)
tf_content = tf_transformer_content.fit_transform(data_content)
tf_weight_content = tf_content.toarray().tolist()
tfidf_weight_content = normalize([[x*y for x,y in zip(tf_weight_content[0],idf_content)]], norm='l2').tolist()
clf_content = joblib.load(model_load_path+name+'_train_content_classifier.m')
tmp = clf_content.predict_proba(tfidf_weight_content).tolist()
if tmp[0][1] < threshold:
prediction = 0
Predictions.append(prediction)
worksheet2.cell(row=count_delete,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=count_delete,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_delete += 1
print(name+' | Predict | Index | '+str(idx)+' | Delete')
else:
prediction = 1
Predictions.append(prediction)
worksheet1.cell(row=count_retain,column=1).value = original_data[0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=count_retain,column=2).value = original_data[1].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=count_retain,column=3).value = original_data[2].encode('gbk','ignore').decode('gbk','ignore')
count_retain += 1
print(name+' | Predict | Index | '+str(idx)+' | Retain')
workbook.save('results/'+name+'/predict/results/'+name+'_predict_results.xlsx')
print(name+' | Predict | Number of Data | '+str(len(Labels)))
num_positive = Labels.count(1)
num_negative = Labels.count(0)
print(name+' | Predict | Number of Positive | '+str(num_positive))
print(name+' | Predict | Number of Negative | '+str(num_negative)+'\n')
recall = metrics.recall_score(Labels,Predictions,pos_label=1)
precision = metrics.precision_score(Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(Labels,Predictions,pos_label=1)
print(name+' | Predict | Positive Recall | ' + '%.4f'%recall)
print(name+' | Predict | Positive Precision | ' + '%.4f'%precision)
print(name+' | Predict | Positive F1 | ' + '%.4f'%f1+'\n') | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/catl/draft.py | draft.py |
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
elif stop is None:
return None
else: # assume it's a collection
return frozenset(stop)
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if isinstance(vocabulary, set):
vocabulary = sorted(vocabulary)
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False) | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/catl/text.py | text.py |
import os
import pickle
import xlrd
import re
import jieba
from openpyxl import Workbook
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.preprocessing import normalize
from sklearn import metrics
from openpyxl import Workbook
from sklearn.externals import joblib
class preprocess_train(object):
def __init__(self,name,path):
self.name = name
self.path = path
self.chinese_stopwords = []
home_path = os.path.dirname(os.path.realpath(__file__))
stopwords_path = os.path.join(home_path,'data/stopwords.txt')
for line in open(stopwords_path,'rb'):
self.chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
self.key_words = []
for line in open(self.path+'_original_key_words.txt','rb'):
self.key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(self.path+'_original_key_words.txt')
def read_excel(self):
excel = xlrd.open_workbook(self.path+'_train.xls')
table = excel.sheet_by_index(0)
num_rows = table.nrows-1
self.original_data = []
for idx in range(1,num_rows+1):
row = table.row_values(idx)
self.original_data.append(row)
self.data = list(map(list,zip(*self.original_data)))
self.labels = [int(self.data[2][i]=='保留') for i in range(num_rows)]
def document2sentences(self,document):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(self,words):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in self.chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
def excel2sentences(self):
contents = self.data[1]
print(self.name+' | Train | Content | Document 2 Sentences ......')
contents_sentences = [self.document2sentences(document) for document in contents]
original_data_useless = [self.original_data[i] for i in range(len(contents_sentences)) if contents_sentences[i] == '']
self.original_data = [self.original_data[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
self.labels = [self.labels[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
titles = [self.data[0][i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
contents_sentences = [contents_sentences[i] for i in range(len(contents_sentences)) if contents_sentences[i] != '']
workbook = Workbook()
worksheet1 = workbook.active
worksheet1.title = 'use'
worksheet1.cell(row=1,column=1).value = 'title'
worksheet1.cell(row=1,column=2).value = 'content'
worksheet1.cell(row=1,column=3).value = 'label'
for i in range(len(self.original_data)):
print(i)
worksheet1.cell(row=i+2,column=1).value = self.original_data[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=2).value = contents_sentences[i].encode('gbk','ignore').decode('gbk','ignore')
worksheet1.cell(row=i+2,column=3).value = self.original_data[i][2].encode('gbk','ignore').decode('gbk','ignore')
worksheet2 = workbook.create_sheet('useless')
worksheet2.cell(row=1,column=1).value = 'title'
worksheet2.cell(row=1,column=2).value = 'content'
worksheet2.cell(row=1,column=3).value = 'label'
for i in range(len(original_data_useless)):
worksheet2.cell(row=i+2,column=1).value = original_data_useless[i][0].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=2).value = original_data_useless[i][1].encode('gbk','ignore').decode('gbk','ignore')
worksheet2.cell(row=i+2,column=3).value = original_data_useless[i][2].encode('gbk','ignore').decode('gbk','ignore')
workbook.save(self.path+'_train_sentences.xlsx')
print(self.name+' | Train | Title | Tokenized ......')
titles_tokenized = [jieba.lcut(sentences) for sentences in titles]
print(self.name+' | Train | Content | Tokenized ......')
contents_sentences_tokenized = [jieba.lcut(sentences) for sentences in contents_sentences]
print(self.name+' | Train | Title | Filtered ......')
self.titles_tokenized_filtered = [self.filtrate_words(words) for words in titles_tokenized]
print(self.name+' | Train | Content | Filtered ......')
self.contents_sentences_tokenized_filtered = [self.filtrate_words(words) for words in contents_sentences_tokenized]
return self.original_data,original_data_useless,self.labels
def get_chi(self,data,labels):
num = len(data)
length = len(data[0])
data_p = [data[i] for i in range(num) if labels[i]==1]
data_n = [data[i] for i in range(num) if labels[i]==0]
num_p = len(data_p)
num_n = len(data_n)
data_p_t = list(map(list,zip(*data_p)))
data_n_t = list(map(list,zip(*data_n)))
chi_square = []
for i in range(length):
b = data_p_t[i].count(0)
d = data_n_t[i].count(0)
a = num_p-b
c = num_n-d
if num_p*num_n*(a+c)*(b+d) == 0:
chi_square.append(0)
else:
chi_square.append((num*pow(a*d-b*c,2))/(num_p*num_n*(a+c)*(b+d)))
return chi_square
def get_vocabulary_title(self,title_weight,feature_ratio):
data = [title_weight*(self.titles_tokenized_filtered[i]+' ')+self.contents_sentences_tokenized_filtered[i] for i in range(len(self.labels))]
labels = self.labels
tf_transformer = CountVectorizer(ngram_range=(1,3))
tf = tf_transformer.fit_transform(data)
vocabulary_list = tf_transformer.get_feature_names()
print(self.name+' | Train | Title | Vocabulary | Original Length | ' + str(len(vocabulary_list)))
num_key_words = int(len(vocabulary_list)*feature_ratio)
print(self.name+' | Train | Title | Vocabulary | Length | ' + str(num_key_words))
tf_weights = tf.toarray().tolist()
chi_square = self.get_chi(tf_weights,labels)
print(self.name+' | Train | Title | Vocabulary | Complete by CHI ......')
original_vocabulary_chi_square = [(vocabulary_list[i],chi_square[i]) for i in range(len(vocabulary_list))]
sorted_original_vocabulary_chi_square = sorted(original_vocabulary_chi_square,key=lambda x:x[1],reverse=True)
vocabulary_list = [sorted_original_vocabulary_chi_square[i][0] for i in range(num_key_words)]
self.vocabulary_title = {}
k = 0
for word in vocabulary_list:
self.vocabulary_title[word] = k
k += 1
return self.vocabulary_title
def get_tfidf_title(self,title_weight):
data = [title_weight*(self.titles_tokenized_filtered[i]+' ')+self.contents_sentences_tokenized_filtered[i] for i in range(len(self.labels))]
tf_transformer = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_title)
train_tf = tf_transformer.fit_transform(data)
print(self.name+' | Train | Title | TF | Completed ......')
tfidf_transformer = TfidfTransformer(norm='l2',use_idf=True,smooth_idf=True)
train_tfidf = tfidf_transformer.fit_transform(train_tf)
train_tfidf_weights = train_tfidf.toarray().tolist()
print(self.name+' | Train | Title | TFIDF | Completed ......')
idf = tfidf_transformer.idf_.tolist()
return train_tfidf_weights,idf
def get_vocabulary_content(self,feature_ratio,index):
data = [self.contents_sentences_tokenized_filtered[idx] for idx in index]
labels = [self.labels[idx] for idx in index]
tf_transformer = CountVectorizer(ngram_range=(1,3))
tf = tf_transformer.fit_transform(data)
vocabulary_list = tf_transformer.get_feature_names()
print(self.name+' | Train | Content | Vocabulary | Original Length | ' + str(len(vocabulary_list)))
num_key_words = int(len(vocabulary_list)*feature_ratio)
print(self.name+' | Train | Content | Vocabulary | Length | ' + str(num_key_words))
tf_weights = tf.toarray().tolist()
chi_square = self.get_chi(tf_weights,labels)
print(self.name+' | Train | Content | Vocabulary | Complete by CHI ......')
original_vocabulary_chi_square = [(vocabulary_list[i],chi_square[i]) for i in range(len(vocabulary_list))]
sorted_original_vocabulary_chi_square = sorted(original_vocabulary_chi_square,key=lambda x:x[1],reverse=True)
vocabulary_list = [sorted_original_vocabulary_chi_square[i][0] for i in range(num_key_words)]
self.vocabulary_content = {}
k = 0
for word in vocabulary_list:
self.vocabulary_content[word] = k
k += 1
return self.vocabulary_content
def get_tfidf_content(self,index):
data = [self.contents_sentences_tokenized_filtered[idx] for idx in index]
tf_transformer = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_content)
train_tf = tf_transformer.fit_transform(data)
print(self.name+' | Train | Content | TF | Completed ......')
tfidf_transformer = TfidfTransformer(norm='l2',use_idf=True,smooth_idf=True)
train_tfidf = tfidf_transformer.fit_transform(train_tf)
train_tfidf_weights = train_tfidf.toarray().tolist()
print(self.name+' | Train | Content | TFIDF | Completed ......')
idf = tfidf_transformer.idf_.tolist()
return train_tfidf_weights,idf
class single_predict(object):
def __init__(self,name,title,content):
self.name = name
self.title = title
self.content = content
current_path = os.getcwd()
if os.path.isdir('results/'+self.name+'/predict/results/') == False:
os.makedirs(r'results/'+self.name+'/predict/results/')
self.path = 'data/'+self.name+'/'+self.name
self.model_load_path = 'results/'+self.name+'/train/model/'
self.chinese_stopwords = []
file_path = os.path.dirname(os.path.realpath(__file__))
for line in open(os.path.join(file_path, 'data/stopwords.txt'),'rb'):
self.chinese_stopwords.append(line.decode('utf-8-sig').split()[0])
self.key_words = []
for line in open(self.path+'_original_key_words.txt','rb'):
self.key_words.append(line.decode('utf-8-sig').split()[0])
jieba.load_userdict(self.path+'_original_key_words.txt')
with open('data/'+self.name+'/preprocess/'+self.name+'_vocabulary_title.pkl','rb') as load1:
self.vocabulary_title = pickle.load(load1)
with open('data/'+self.name+'/preprocess/'+self.name+'_idf_title.pkl','rb') as load2:
self.idf_title = pickle.load(load2)
with open('results/'+self.name+'/train/model/'+'title_threshold.pkl','rb') as load3:
self.Threshold = pickle.load(load3)
with open('results/'+self.name+'/train/model/'+'content_threshold.pkl','rb') as load4:
self.threshold = pickle.load(load4)
with open('data/'+self.name+'/preprocess/'+self.name+'_vocabulary_content.pkl','rb') as load5:
self.vocabulary_content = pickle.load(load5)
with open('data/'+self.name+'/preprocess/'+self.name+'_idf_content.pkl','rb') as load6:
self.idf_content = pickle.load(load6)
def document2sentences(self,document):
symbols = frozenset(u",。!?\n:;“”|)\u3000")
sentences= []
tmp = []
for character in document:
if not symbols.__contains__(character):
tmp.append(character)
elif character in ",。!?\n:;“”|)":
tmp.append("。")
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
tmp = []
elif character == "\u3000":
continue
for i in range(len(self.key_words)):
if self.key_words[i] in ''.join(tmp):
sentences.append(''.join(tmp))
return ''.join(sentences)
def filtrate_words(self,words):
find_chinese = re.compile(u"[\u4e00-\u9fa5]+")
symbols = "[A-Za-z0-9\[\`\~\!\@\#\$\^\&\*\(\)\=\|\{\}\'\:\;\'\,\[\]\.\<\>\/\?\~\!\@\#\\\&\*\%]"
filtrated_words = []
for j in range(len(words)):
if re.findall(find_chinese,words[j]) == []:
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) == '':
continue
elif re.sub(symbols, "",re.findall(find_chinese,words[j])[0]) in self.chinese_stopwords:
continue
else:
filtrated_words.append(re.sub(symbols, "",re.findall(find_chinese,words[j])[0]))
return ' '.join(filtrated_words)
def predict(self):
content_sentences = self.document2sentences(self.content)
if content_sentences == '':
prediction = '删除'
else:
title_tokenized = jieba.lcut(self.title)
content_sentences_tokenized = jieba.lcut(content_sentences)
title_tokenized_filtered = self.filtrate_words(title_tokenized)
content_sentences_tokenized_filtered = self.filtrate_words(content_sentences_tokenized)
data_title = [5*(title_tokenized_filtered+' ')+content_sentences_tokenized_filtered]
tf_transformer_title = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_title)
tf_title = tf_transformer_title.fit_transform(data_title)
tf_weight_title = tf_title.toarray().tolist()
tfidf_weight_title = normalize([[x*y for x,y in zip(tf_weight_title[0],self.idf_title)]], norm='l2').tolist()
for ite in range(1,len(self.Threshold)+1):
clf_title = joblib.load(self.model_load_path+self.name+'_iteration_'+str(ite)+'_train_title_classifier.m')
tmp = clf_title.predict_proba(tfidf_weight_title).tolist()
if tmp[0][1] < self.Threshold[ite]:
prediction = '删除'
ite -= 1
break
else:
continue
if ite == len(self.Threshold):
data_content = [content_sentences_tokenized_filtered]
tf_transformer_content = CountVectorizer(ngram_range=(1,3),vocabulary=self.vocabulary_content)
tf_content = tf_transformer_content.fit_transform(data_content)
tf_weight_content = tf_content.toarray().tolist()
tfidf_weight_content = normalize([[x*y for x,y in zip(tf_weight_content[0],self.idf_content)]], norm='l2').tolist()
clf_content = joblib.load(self.model_load_path+self.name+'_train_content_classifier.m')
tmp = clf_content.predict_proba(tfidf_weight_content).tolist()
if tmp[0][1] < self.threshold:
prediction = '删除'
else:
prediction = '保留'
return prediction | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/catl/utilities.py | utilities.py |
import numpy as np
import heapq
from sklearn import linear_model
from sklearn.externals import joblib
import matplotlib.pyplot as plt
from sklearn import metrics
class ensemble(object):
def __init__(self,name,r,data,labels,model_save_path,results_save_path):
self.Name = name
self.Data = data
self.Labels = labels
self.model_save_path = model_save_path
self.results_save_path = results_save_path
self.Num = len(labels)
self.Index = [i for i in range(self.Num)]
print(self.Name+' | Train | Title | Number of Data | '+str(self.Num))
self.Num_Positive = self.Labels.count(1)
self.Num_Negative = self.Labels.count(0)
print(self.Name+' | Train | Title | Number of Positive | '+str(self.Num_Positive))
print(self.Name+' | Train | Title | Number of Negative | '+str(self.Num_Negative))
print(self.Name+' | Train | Title | Data Loaded'+'\n')
self.Ite = 1
self.Index_Retain_Train = [i for i in range(self.Num)]
self.Index_Retain_Predict = [i for i in range(self.Num)]
self.Index_Delete = {}
self.Recall = []
self.Precision = []
self.F1 = []
self.Threshold = {}
self.recall = r
self.config = True
def classifier(self,data,labels):
clf = linear_model.SGDClassifier(loss='log',penalty='l1',alpha=1e-3,class_weight='balanced',learning_rate='optimal',eta0=0.0)
clf.fit(data,labels)
probabilities = []
probabilities_positive = []
probabilities_negative = []
tmp = clf.predict_proba(data)
for i in range(len(data)):
if labels[i] == 1:
probabilities.append(tmp[i][1])
probabilities_positive.append(tmp[i][1])
else:
probabilities.append(tmp[i][1])
probabilities_negative.append(tmp[i][1])
return clf,probabilities,probabilities_positive,probabilities_negative
def unit(self):
data_train = [self.Data[idx] for idx in self.Index_Retain_Train]
labels_train = [self.Labels[idx] for idx in self.Index_Retain_Train]
num_positive = labels_train.count(1)
num_negative = labels_train.count(0)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Logistic Regression ... ...')
clf_lr,probabilities_train,probabilities_positive_train,probabilities_negative_train = self.classifier(data=data_train,labels=labels_train)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Adjust Threshold ... ...')
threshold = heapq.nsmallest(int(0.01*self.Num_Positive),probabilities_positive_train)[-1]
Index_Retain_Train = []
for i in range(num_positive+num_negative):
if labels_train[i] == 1:
Index_Retain_Train.append(self.Index_Retain_Train[i])
elif probabilities_train[i] > threshold:
Index_Retain_Train.append(self.Index_Retain_Train[i])
self.Index_Retain_Train = Index_Retain_Train
data_predict = [self.Data[idx] for idx in self.Index_Retain_Predict]
tmp = clf_lr.predict_proba(data_predict).tolist()
probabilities_predict = list(map(list,zip(*tmp)))[1]
Predictions = [0 for i in range(self.Num)]
Index_Retain_Predict = []
self.Index_Delete[self.Ite] = []
for i in range(len(data_predict)):
if probabilities_predict[i] >= threshold:
Index_Retain_Predict.append(self.Index_Retain_Predict[i])
Predictions[self.Index_Retain_Predict[i]] = 1
else:
self.Index_Delete[self.Ite].append(self.Index_Retain_Predict[i])
self.Index_Retain_Predict = Index_Retain_Predict
recall = metrics.recall_score(self.Labels,Predictions,pos_label=1)
precision = metrics.precision_score(self.Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(self.Labels,Predictions,pos_label=1)
if recall >= self.recall:
self.f1 = f1
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Recall | ' + '%.4f'%recall)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Precision | ' + '%.4f'%precision)
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive F1 | ' + '%.4f'%f1+'\n')
self.Recall.append(recall)
self.Precision.append(precision)
self.F1.append(f1)
joblib.dump(clf_lr,self.model_save_path+self.Name+'_iteration_'+str(self.Ite)+'_train_title_classifier.m')
self.Threshold[self.Ite] = threshold
self.Ite += 1
else:
print(self.Name+' | Train | Title | iteration | '+str(self.Ite)+' | Positive Recall Less Than Given Recall'+'\n')
self.Index_Retain_Predict += self.Index_Delete[self.Ite]
del self.Index_Delete[self.Ite]
self.config = False
def train_title(self):
while self.config == True:
self.unit()
plt.figure(figsize=(8,8),dpi=100)
plt.xlim(0,self.Ite+1)
plt.scatter(range(1,self.Ite),self.Recall,s=100,marker='+',color='r')
plt.plot(range(1,self.Ite),self.Recall,linestyle='-',color='r',linewidth=1.5,label='recall')
plt.scatter(range(1,self.Ite),self.Precision,s=100,marker='+',color='g')
plt.plot(range(1,self.Ite),self.Precision,linestyle='-',color='g',linewidth=1.5,label='precision')
plt.scatter(range(1,self.Ite),self.F1,s=100,marker='+',color='b')
plt.plot(range(1,self.Ite),self.F1,linestyle='-',color='b',linewidth=1.5,label='f1')
plt.legend(loc='lower right',fontsize=10)
plt.savefig(self.results_save_path+self.Name+'_train_title_results.png')
return self.Threshold,self.Index_Retain_Predict,self.Index_Delete
def train_content(self,data,r):
data_train = data
labels_train = [self.Labels[idx] for idx in self.Index_Retain_Predict]
print(self.Name+' | Train | Content | Number of Data | '+str(len(labels_train)))
num_positive = labels_train.count(1)
num_negative = labels_train.count(0)
print(self.Name+' | Train | Content | Number of Positive | '+str(num_positive))
print(self.Name+' | Train | Content | Number of Negative | '+str(num_negative)+'\n')
clf_xg = linear_model.SGDClassifier(loss='log',penalty='l1',alpha=1e-3,class_weight='balanced',learning_rate='optimal',eta0=0.0)
clf_xg.fit(data_train,labels_train)
joblib.dump(clf_xg,self.model_save_path+self.Name+'_train_content_classifier.m')
tmp = clf_xg.predict_proba(np.array(data_train)).tolist()
probabilities_predict = list(map(list,zip(*tmp)))[1]
Recall = []
Precision = []
F1 = []
Threshold = []
for t in [x/1000 for x in range(1001)]:
Predictions = [0 for i in range(self.Num)]
for i in range(len(data_train)):
if probabilities_predict[i] >= t:
Predictions[self.Index_Retain_Predict[i]] = 1
recall = metrics.recall_score(self.Labels,Predictions,pos_label=1)
precision = metrics.precision_score(self.Labels,Predictions,pos_label=1)
f1 = metrics.f1_score(self.Labels,Predictions,pos_label=1)
Recall.append(recall)
Precision.append(precision)
F1.append(f1)
Threshold.append(t)
if recall < r:
break
print(self.Name+' | Train | Content | Finally | Threshold | ' + '%.4f'%Threshold[-1]+'\n')
print(self.Name+' | Train | Content | Finally | Positive Recall | ' + '%.4f'%Recall[-1])
print(self.Name+' | Train | Content | Finally | Positive Precision | ' + '%.4f'%Precision[-1])
print(self.Name+' | Train | Content | Finally | Positive F1 | ' + '%.4f'%F1[-1]+'\n')
plt.figure(figsize=(8,8),dpi=100)
plt.plot(Threshold,Recall,linestyle='-',color='r',linewidth=1.5,label='recall')
plt.plot(Threshold,Precision,linestyle='-',color='g',linewidth=1.5,label='precision')
plt.plot(Threshold,F1,linestyle='-',color='b',linewidth=1.5,label='f1')
plt.legend(loc='lower center',fontsize=10)
plt.savefig(self.results_save_path+self.Name+'_train_content_results.png')
Index_Retain_Predict = []
Index_Delete = []
for i in range(len(data_train)):
if probabilities_predict[i] >= Threshold[-1]:
Index_Retain_Predict.append(self.Index_Retain_Predict[i])
else:
Index_Delete.append(self.Index_Retain_Predict[i])
return Threshold[-1],Index_Retain_Predict,Index_Delete | zzsnML | /zzsnML-1.0.1-py3-none-any.whl/catl/model.py | model.py |
from zzstocklib_pkg import zzlogger
from urllib import request,parse
import time,datetime
import json
import re
import pandas as pd
import numpy as np
logger = zzlogger.logger
def get_sinacodelist(stock_list):
"""根据股票代码转译为sina所需要的代码,香港hk,沪sh,深sz""" #https://www.cnblogs.com/xuliangxing/p/8492705.html
new_codelist = []
for code in stock_list:
if len(code) == 5: #香港交易所
code = "hk" + code
elif len(code) == 6: #沪深交易所
if code.startswith('600') or code.startswith('601') or code.startswith('603') or code.startswith('688') or code.startswith('501') or code.startswith('516') or code.startswith('113'):
code = "sh" + code
elif code.startswith('000') or code.startswith('001') or code.startswith('002') or code.startswith('300') or code.startswith('128') or code.startswith('127'):
code = "sz" + code
else:
logger.error("Error: code " + code + " not found in stock market!")
continue
else:
logger.error("Error: code " + code + " not found in stock market!")
continue
new_codelist.append(code)
#print(new_codelist)
return new_codelist
def get_stocklistprice(stock_list):
"""获取当前股票价格"""
stocks = ','.join(get_sinacode(stock_list))
stock_price = pd.DataFrame(columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
try:
page = request.urlopen("http://hq.sinajs.cn/?list=" + stocks)
result = page.read().decode('gb2312')
except request.URLError as e:
logger.error(e)
else:
price_list = result.split('\n')
for stock in price_list:
if len(stock.strip()) <= 0:
continue
data = re.findall(r'"(.+?)"', stock)
code = re.findall(r'str_[hkszsh]{2}(.+?)=', stock)
#print(data)
data = data[0].split(',')
if "str_hk" in stock: #如果是港股,则将英文名去掉和沪深的格式看齐,且将当前价格位置位移到第3位
del data[0]
data.insert(3,data[5])
stock = data
df = pd.DataFrame([stock[0:6]], index=code, columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
stock_price = stock_price.append(df)
stock_price['open_price']=stock_price['open_price'].apply(float)
stock_price['lastday_price']=stock_price['lastday_price'].apply(float)
stock_price['current_price']=stock_price['current_price'].apply(float)
stock_price['highest_price']=stock_price['highest_price'].apply(float)
stock_price['lowest_price']=stock_price['lowest_price'].apply(float)
return stock_price
def get_sinacode(stock_code):
"""根据股票代码转译为sina所需要的代码,香港hk,沪sh,深sz""" #https://www.cnblogs.com/xuliangxing/p/8492705.html
code = stock_code
if len(code) == 5: # 香港交易所
code = "hk" + code
elif len(code) == 6: # 沪深交易所
if code.startswith('600') or code.startswith('601') or code.startswith('603') or code.startswith('688') or code.startswith('501') or code.startswith('516') or code.startswith('113'):
code = "sh" + code
elif code.startswith('000') or code.startswith('001') or code.startswith('002') or code.startswith('300') or code.startswith('128') or code.startswith('127'):
code = "sz" + code
else:
logger.error("Error: code " + code +" not found in stock market!")
else:
logger.error("Error: code " + code + " not found in stock market!")
return code
def get_lastday_stockprice(stock_code):
"""获取当前股票价格"""
try:
page = request.urlopen("http://hq.sinajs.cn/?list=" + get_sinacode(stock_code))
result = page.read().decode('gb2312')
#print(result)
except request.URLError as e:
logger.error(e)
else:
content_data = re.findall(r'"(.+?)"', result)
#print(content_data)
data = content_data[0].split(',')
if "str_hk" in result: #如果是港股,则将英文名去掉和沪深的格式看齐,且将当前价格位置位移到第3位
del data[0]
data.insert(3,data[5])
stock_lastday_price = data[2]
#df = pd.DataFrame([stock[0:6]], index=code, columns=('name','open_price','lastday_price','current_price','highest_price','lowest_price'))
return float(stock_lastday_price) | zzstocklib-pkg-pubbyzz | /zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/sinaFinanceUtility.py | sinaFinanceUtility.py |
import re
import os
import tempfile
class Properties:
def __init__(self, file_name):
self.file_name = file_name
self.properties = {}
if os.path.exists(file_name):
with open(file_name) as f:
for line in f:
tline = line.strip()
if tline.startswith('#'):
continue
else:
kv_list = tline.split('=', 2)
if not kv_list or len(kv_list) != 2:
continue
else:
value_list = kv_list[1].strip().split(',')
if not value_list:
continue
else:
if len(value_list) == 1:
self.properties[kv_list[0].strip()] = value_list[0].strip()
else:
temp = []
for v in value_list:
temp.append(v.strip())
self.properties[kv_list[0].strip()] = temp
else:
raise Exception("file %s not found" % file_name)
def get(self, key):
if key in self.properties:
return self.properties[key]
return ''
def get_list(self, key):
if key in self.properties:
temp = self.properties[key]
if isinstance(temp, list):
return temp
else:
return [temp]
return []
def get_num(self, key):
if key in self.properties:
return float(self.properties[key])
return 0
path = os.path.split(os.path.realpath(__file__))[0]
config_file_path = os.path.join(path, 'config/global.properties') # 存放log文件的路径
properties = Properties(config_file_path)
#print(properties.get('notification_queue_name')) | zzstocklib-pkg-pubbyzz | /zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/propertiesUtility.py | propertiesUtility.py |
import pandas as pd
import requests
from lxml import etree
import time
import json
from pandas.io.json import json_normalize
import os
import re
def get_KZZInfo_from_JSL():
now_time = time.time()
url = 'https://www.jisilu.cn/data/cbnew/cb_list/?___jsl=LST___t=' + str(now_time)
# 发送请求并解析HTML对象
response = requests.get(url)
jsonObj = response.json()
df = pd.DataFrame.from_dict(json_normalize(jsonObj['rows']), orient='columns')
#print(df)
#bond_code, bond_price, bond_increase_rate,bond_stock_price,bond_stock_increase_rate
jslbond_df = df[['cell.bond_id','cell.price','cell.increase_rt','cell.sprice','cell.sincrease_rt']]
jslbond_df.columns = ['bond_code','bond_price','bond_increase_rate','bond_stock_price','bond_stock_increase_rate']
return jslbond_df
#Assume the kzz_df is a dataframe
def merge_KZZlist_withJSLprice(kzz_df):
#kzz_df['债券现价'] = 0.0
kzz_df['债券振幅'] = 0.0
#kzz_df['股票现价'] = 0.0
kzz_df['正股振幅'] = 0.0
current_bondprice_df = get_KZZInfo_from_JSL()
for index, row in current_bondprice_df.iterrows():
try:
temp_row = kzz_df.loc[kzz_df['债券代码'] == row['bond_code']]
temp_row['当前价'] = row['bond_price']
temp_row['债券振幅'] = row['bond_increase_rate']
temp_row['正股股价'] = row['bond_stock_price']
temp_row['正股振幅'] = row['bond_stock_increase_rate']
kzz_df.loc[kzz_df['债券代码'] == row['bond_code']] = temp_row
except KeyError:
print('code {} is not in KZZ list. It need to be updated.'.format(row['bond_code']))
return kzz_df
def gen_KZZDetaillist_with_RPAData(rpa_data_file_path):
#path = "/Users/zhangzhi/temp/zz/" #文件夹目录
path = rpa_data_file_path
files= os.listdir(path) #得到文件夹下的所有文件名称
data = pd.DataFrame(columns=('债券代码','债券名称','正股代码','正股名称','正股股价','市净率','每股净资产','转股价','信用级别'
,'转股开始日','转股结束日','回售触发价','回售执行日','强赎触发价','赎回登记日','上市日','到期日'
,'发行规模','利率1','利率2','利率3','利率4','利率5','利率6','赎回利率','回售条款','赎回条款'))
for file in files:
if file.find("bak") == -1:
print(file)
df = pd.read_csv(path + file,header=None)
df2 = pd.read_csv(path + df[2][0] + 'bak.csv',header=None)
code = re.findall(r'年(.+?%)', df[2][27])
lastcode = re.findall(r'([0-9]+%)', df2[2][1])
if len(code) == 6:
row={'债券代码':df[2][0],'债券名称':df[4][0],'正股代码':df[2][4],'正股名称':df[4][4],'正股股价':df[2][10],'市净率':df[4][10],
'每股净资产':0,'转股价':df[4][11],'信用级别':df[2][23]
,'转股开始日':df[2][14],'转股结束日':df[4][14],'回售触发价':df[2][13],'回售执行日':df[2][18],
'强赎触发价':df[4][13],'赎回登记日':df[2][16],'上市日':df[2][24],'到期日':df[2][26]
,'发行规模':df[4][21],'利率1':code[0],'利率2':code[1],'利率3':code[2],'利率4':code[3],
'利率5':code[4],'利率6':code[5],'赎回利率':lastcode[0],'回售条款':df2[2][0],'赎回条款':df2[2][1]}
else:
row={'债券代码':df[2][0],'债券名称':df[4][0],'正股代码':df[2][4],'正股名称':df[4][4],'正股股价':df[2][10],'市净率':df[4][10],
'每股净资产':0,'转股价':df[4][11],'信用级别':df[2][23]
,'转股开始日':df[2][14],'转股结束日':df[4][14],'回售触发价':df[2][13],'回售执行日':df[2][18],
'强赎触发价':df[4][13],'赎回登记日':df[2][16],'上市日':df[2][24],'到期日':df[2][26]
,'发行规模':df[4][21],'利率1':code[0],'利率2':code[1],'利率3':code[2],'利率4':code[3],
'利率5':code[4],'赎回利率':lastcode[0],'回售条款':df2[2][0],'赎回条款':df2[2][1]}
data = data.append(row,ignore_index=True)
return data
def check_KZZ_with_Rules(kzz_df):
notification_dict = {}
for index, row in kzz_df.iterrows():
#rule 1:当某只可转债跌破历史最低价
if row['当前价'] <= row['历史最低价']:
key = '债券:{} code:{} 当前价格低于历史最低价{}'.format(row['债券名称'],row['债券代码'],row['历史最低价'])
if (key not in notification_dict.keys()):
notification_dict[key] = 'waitting'
#rule 2:当某只可转债跌到历史最低价+5%左右,且年化收益在5%以上
if row['当前价'] <= row['历史最低价']*1.05 and row['当前价'] <= row['年化%5收益率']:
key = '债券:{} code:{} 当前价格历史最低价{}+5%及年化收益5%空间'.format(row['债券名称'],row['债券代码'],row['历史最低价'])
if (key not in notification_dict.keys()):
notification_dict[key] = 'waitting'
return notification_dict
pd = gen_KZZDetaillist_with_RPAData("C:\\zz\\")
pd.to_excel("C:\ss.xlsx") | zzstocklib-pkg-pubbyzz | /zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/KZZUtility.py | KZZUtility.py |
import pandas as pd
import struct
import datetime
import os
#only deal with tongxinda shanghai&shenzhen stock lday data
def stock_csv(code):
file_object_path = 'C:/workspace/stock/stockdata/lday/' + code +'.csv'
filepath ='C:/new_jyplug/vipdoc/sz/lday/sz' + code +'.day'
if not os.path.exists(filepath):
filepath = 'C:/new_jyplug/vipdoc/sh/lday/sh' + code +'.day'
if not os.path.exists(filepath):
filepath = 'C:/new_jyplug/vipdoc/sh/lday/sh' + code +'.day'
#如果当前上海和深圳市场都没有文件,则在本地新建一个空文件退出
file_object = open(file_object_path, 'w+')
file_object.close()
return
data = []
with open(filepath, 'rb') as f:
file_object = open(file_object_path, 'w+')
while True:
stock_date = f.read(4)
stock_open = f.read(4)
stock_high = f.read(4)
stock_low= f.read(4)
stock_close = f.read(4)
stock_amount = f.read(4)
stock_vol = f.read(4)
stock_reservation = f.read(4)
# date,open,high,low,close,amount,vol,reservation
if not stock_date:
break
stock_date = struct.unpack("l", stock_date) # 4字节 如20091229
stock_open = struct.unpack("l", stock_open) #开盘价*1000
stock_high = struct.unpack("l", stock_high) #最高价*1000
stock_low= struct.unpack("l", stock_low) #最低价*1000
stock_close = struct.unpack("l", stock_close) #收盘价*1000
stock_amount = struct.unpack("f", stock_amount) #成交额
stock_vol = struct.unpack("l", stock_vol) #成交量
stock_reservation = struct.unpack("l", stock_reservation) #保留值
date_format = datetime.datetime.strptime(str(stock_date[0]),'%Y%M%d') #格式化日期
list= date_format.strftime('%Y-%M-%d')+","+str(stock_open[0]/1000)+","+str(stock_high[0]/1000.0)+","+str(stock_low[0]/1000.0)+","+str(stock_close[0]/1000.0)+","+str(stock_amount[0])+","+str(stock_vol[0])+"\r\n"
file_object.writelines(list)
file_object.close()
def load_stock(code):
file_url = 'C:/workspace/stock/stockdata/lday/' + code +'.csv'
#if not os.path.exists(file_url):
# stock_csv(code)
# 每次都装载最新
stock_csv(code)
df = pd.read_csv(file_url, names=['date','open','high','low','close','amount','vol'])
return df
kzz_df = pd.read_excel('C:\\workspace\\stock\\stockdata\\dict\\KZZ.xlsx',dtype={'债券代码':str})
# 对于每一行,通过列名name访问对应的元素
kzz_df['历史最低价'] = 0.0
kzz_df['历史最高价'] = 0.0
kzz_df['当前价'] = 0.0
kzz_df['剩余年限'] = 0.0
kzz_df['到期价值'] = 0.0
kzz_df['到期收益率'] = 0.0
kzz_df['到期年化收益率'] = 0.0
kzz_df['年化%5收益率'] = 0.0
for index, row in kzz_df.iterrows():
temp_df = load_stock(row['债券代码'])
if len(temp_df) > 0:
row['历史最低价'] = temp_df['close'].min()
row['历史最高价'] = temp_df['close'].max()
row['当前价'] = temp_df.iloc[-1]['close']
#print(row['债券代码'] + ' ' + str(row['历史最低价']))
kzz_df.iloc[index] = row
kzz_df.to_excel('C:\workspace\stock\stockdata\dict\kzz_updated.xlsx',index=False) | zzstocklib-pkg-pubbyzz | /zzstocklib_pkg_pubbyzz-0.0.2-py3-none-any.whl/zzstocklib_pkg/genKZZreport.py | genKZZreport.py |
import os
import smtplib
import time
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
class EmailConf:
EmailQQ = {"host": "smtp.qq.com", "port": 465}
Email163 = {"host": "smtp.163.com", "port": 465}
class SendEmail:
"""发送邮件"""
def __init__(self, host, user, password, port=465):
"""
初始化设置
:param host: smtp服务器地址(qq邮箱:smtp.qq.com,163邮箱:smtp.163.com")
:param port: smtp服务器端口:465
:param user: 邮箱账号
:param password: 邮箱的smtp服务授权码
"""
self.smtp = smtplib.SMTP_SSL(host=host, port=port)
self.smtp.login(user=user, password=password)
self.user = user
def send_email(self, subject="测试报告", content=None, filename=None, to_addrs=None):
"""
发送邮件
:param subject: 邮件主题
:param content: 邮件内容
:param filename: 报告文件的完整路径
:param to_addrs: 收件人地址
:type to_addrs: str or list
:return:
"""
print("--------准备发送测试报告---------")
msg = MIMEMultipart()
msg["Subject"] = subject
msg["From"] = self.user
if isinstance(to_addrs, str):
msg["To"] = to_addrs
elif to_addrs and isinstance(to_addrs, list):
msg["To"] = to_addrs[0]
if not content:
content = time.strftime("%Y-%m-%d-%H_%M_%S") + ":测试报告"
# 构建邮件的文本内容
text = MIMEText(content, _subtype="html", _charset="utf8")
msg.attach(text)
# 判断是否要发送附件
if filename and os.path.isfile(filename):
with open(filename, "rb") as f:
content = f.read()
report = MIMEApplication(content, _subtype=None)
name = os.path.split(filename)[1]
report.add_header('content-disposition', 'attachment', filename=name)
msg.attach(report)
# 发送邮件
try:
self.smtp.send_message(msg, from_addr=self.user, to_addrs=to_addrs)
except Exception as e:
print("--------测试报告发送失败------")
raise e
else:
print("--------测试报告发送完毕------") | zzsukitest | /zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/send_email.py | send_email.py |
import re
import sys
import inspect
import warnings
from functools import wraps
from types import MethodType as MethodType
from collections import namedtuple
try:
from collections import OrderedDict as MaybeOrderedDict
except ImportError:
MaybeOrderedDict = dict
from unittest import TestCase
try:
from unittest import SkipTest
except ImportError:
class SkipTest(Exception):
pass
PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
if PY3:
class InstanceType():
pass
lzip = lambda *a: list(zip(*a))
text_type = str
string_types = str,
bytes_type = bytes
def make_method(func, instance, type):
if instance is None:
return func
return MethodType(func, instance)
CompatArgSpec = namedtuple("CompatArgSpec", "args varargs keywords defaults")
def getargspec(func):
if PY2:
return CompatArgSpec(*inspect.getargspec(func))
args = inspect.getfullargspec(func)
if args.kwonlyargs:
raise TypeError((
"parameterized does not (yet) support functions with keyword "
"only arguments, but %r has keyword only arguments. "
"Please open an issue with your usecase if this affects you: "
"https://github.com/wolever/parameterized/issues/new"
) % (func,))
return CompatArgSpec(*args[:4])
def skip_on_empty_helper(*a, **kw):
raise SkipTest("parameterized input is empty")
def reapply_patches_if_need(func):
def dummy_wrapper(orgfunc):
@wraps(orgfunc)
def dummy_func(*args, **kwargs):
return orgfunc(*args, **kwargs)
return dummy_func
if hasattr(func, 'patchings'):
func = dummy_wrapper(func)
tmp_patchings = func.patchings
delattr(func, 'patchings')
for patch_obj in tmp_patchings:
func = patch_obj.decorate_callable(func)
return func
def delete_patches_if_need(func):
if hasattr(func, 'patchings'):
func.patchings[:] = []
_param = namedtuple("param", "args kwargs")
class param(_param):
""" Represents a single parameter to a test case.
For example::
>>> p = param("foo", bar=16)
>>> p
param("foo", bar=16)
>>> p.args
('foo', )
>>> p.kwargs
{'bar': 16}
Intended to be used as an argument to ``@parameterized``::
@parameterized([
param("foo", bar=16),
])
def test_stuff(foo, bar=16):
pass
"""
def __new__(cls, *args, **kwargs):
return _param.__new__(cls, args, kwargs)
@classmethod
def explicit(cls, args=None, kwargs=None):
""" Creates a ``param`` by explicitly specifying ``args`` and
``kwargs``::
>>> param.explicit([1,2,3])
param(*(1, 2, 3))
>>> param.explicit(kwargs={"foo": 42})
param(*(), **{"foo": "42"})
"""
args = args or ()
kwargs = kwargs or {}
return cls(*args, **kwargs)
@classmethod
def from_decorator(cls, args):
""" Returns an instance of ``param()`` for ``@parameterized`` argument
``args``::
>>> param.from_decorator((42, ))
param(args=(42, ), kwargs={})
>>> param.from_decorator("foo")
param(args=("foo", ), kwargs={})
"""
if isinstance(args, param):
return args
elif isinstance(args, string_types):
args = (args,)
try:
return cls(*args)
except TypeError as e:
if "after * must be" not in str(e):
raise
raise TypeError(
"Parameters must be tuples, but %r is not (hint: use '(%r, )')"
% (args, args),
)
def __repr__(self):
return "param(*%r, **%r)" % self
class QuietOrderedDict(MaybeOrderedDict):
""" When OrderedDict is available, use it to make sure that the kwargs in
doc strings are consistently ordered. """
__str__ = dict.__str__
__repr__ = dict.__repr__
def parameterized_argument_value_pairs(func, p):
"""Return tuples of parameterized arguments and their values.
This is useful if you are writing your own doc_func
function and need to know the values for each parameter name::
>>> def func(a, foo=None, bar=42, **kwargs): pass
>>> p = param(1, foo=7, extra=99)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1), ("foo", 7), ("bar", 42), ("**kwargs", {"extra": 99})]
If the function's first argument is named ``self`` then it will be
ignored::
>>> def func(self, a): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("a", 1)]
Additionally, empty ``*args`` or ``**kwargs`` will be ignored::
>>> def func(foo, *args): pass
>>> p = param(1)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1)]
>>> p = param(1, 16)
>>> parameterized_argument_value_pairs(func, p)
[("foo", 1), ("*args", (16, ))]
"""
argspec = getargspec(func)
arg_offset = 1 if argspec.args[:1] == ["self"] else 0
named_args = argspec.args[arg_offset:]
result = lzip(named_args, p.args)
named_args = argspec.args[len(result) + arg_offset:]
varargs = p.args[len(result):]
result.extend([
(name, p.kwargs.get(name, default))
for (name, default)
in zip(named_args, argspec.defaults or [])
])
seen_arg_names = set([n for (n, _) in result])
keywords = QuietOrderedDict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
if name not in seen_arg_names
]))
if varargs:
result.append(("*%s" % (argspec.varargs,), tuple(varargs)))
if keywords:
result.append(("**%s" % (argspec.keywords,), keywords))
return result
def short_repr(x, n=64):
""" A shortened repr of ``x`` which is guaranteed to be ``unicode``::
>>> short_repr("foo")
u"foo"
>>> short_repr("123456789", n=4)
u"12...89"
"""
x_repr = repr(x)
if isinstance(x_repr, bytes_type):
try:
x_repr = text_type(x_repr, "utf-8")
except UnicodeDecodeError:
x_repr = text_type(x_repr, "latin1")
if len(x_repr) > n:
x_repr = x_repr[:n // 2] + "..." + x_repr[len(x_repr) - n // 2:]
return x_repr
def default_doc_func(func, num, p):
if func.__doc__ is None:
return None
all_args_with_values = parameterized_argument_value_pairs(func, p)
# Assumes that the function passed is a bound method.
descs = ["%s=%s" % (n, short_repr(v)) for n, v in all_args_with_values]
# The documentation might be a multiline string, so split it
# and just work with the first string, ignoring the period
# at the end if there is one.
first, nl, rest = func.__doc__.lstrip().partition("\n")
suffix = ""
if first.endswith("."):
suffix = "."
first = first[:-1]
args = "%s[with %s]" % (len(first) and " " or "", ", ".join(descs))
return "".join([first.rstrip(), args, suffix, nl, rest])
def default_name_func(func, num, p):
base_name = func.__name__
name_suffix = "_%s" % (num,)
if len(p.args) > 0 and isinstance(p.args[0], string_types):
name_suffix += "_" + parameterized.to_safe_name(p.args[0])
return base_name + name_suffix
_test_runner_override = None
_test_runner_guess = False
_test_runners = set(["unittest", "unittest2", "nose", "nose2", "pytest"])
_test_runner_aliases = {
"_pytest": "pytest",
}
def set_test_runner(name):
global _test_runner_override
if name not in _test_runners:
raise TypeError(
"Invalid test runner: %r (must be one of: %s)"
% (name, ", ".join(_test_runners)),
)
_test_runner_override = name
def detect_runner():
""" Guess which test runner we're using by traversing the stack and looking
for the first matching module. This *should* be reasonably safe, as
it's done during test disocvery where the test runner should be the
stack frame immediately outside. """
if _test_runner_override is not None:
return _test_runner_override
global _test_runner_guess
if _test_runner_guess is False:
stack = inspect.stack()
for record in reversed(stack):
frame = record[0]
module = frame.f_globals.get("__name__").partition(".")[0]
if module in _test_runner_aliases:
module = _test_runner_aliases[module]
if module in _test_runners:
_test_runner_guess = module
break
if record[1].endswith("python2.6/unittest.py"):
_test_runner_guess = "unittest"
break
else:
_test_runner_guess = None
return _test_runner_guess
class parameterized(object):
""" Parameterize a test case::
class TestInt(object):
@parameterized([
("A", 10),
("F", 15),
param("10", 42, base=42)
])
def test_int(self, input, expected, base=16):
actual = int(input, base=base)
assert_equal(actual, expected)
@parameterized([
(2, 3, 5)
(3, 5, 8),
])
def test_add(a, b, expected):
assert_equal(a + b, expected)
"""
def __init__(self, input, doc_func=None, skip_on_empty=False):
self.get_input = self.input_as_callable(input)
self.doc_func = doc_func or default_doc_func
self.skip_on_empty = skip_on_empty
def __call__(self, test_func):
self.assert_not_in_testcase_subclass()
@wraps(test_func)
def wrapper(test_self=None):
test_cls = test_self and type(test_self)
if test_self is not None:
if issubclass(test_cls, InstanceType):
raise TypeError((
"@parameterized can't be used with old-style classes, but "
"%r has an old-style class. Consider using a new-style "
"class, or '@parameterized.expand' "
"(see http://stackoverflow.com/q/54867/71522 for more "
"information on old-style classes)."
) % (test_self,))
original_doc = wrapper.__doc__
for num, args in enumerate(wrapper.parameterized_input):
p = param.from_decorator(args)
unbound_func, nose_tuple = self.param_as_nose_tuple(test_self, test_func, num, p)
try:
wrapper.__doc__ = nose_tuple[0].__doc__
# Nose uses `getattr(instance, test_func.__name__)` to get
# a method bound to the test instance (as opposed to a
# method bound to the instance of the class created when
# tests were being enumerated). Set a value here to make
# sure nose can get the correct test method.
if test_self is not None:
setattr(test_cls, test_func.__name__, unbound_func)
yield nose_tuple
finally:
if test_self is not None:
delattr(test_cls, test_func.__name__)
wrapper.__doc__ = original_doc
input = self.get_input()
if not input:
if not self.skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
wrapper = wraps(test_func)(skip_on_empty_helper)
wrapper.parameterized_input = input
wrapper.parameterized_func = test_func
test_func.__name__ = "_parameterized_original_%s" % (test_func.__name__,)
return wrapper
def param_as_nose_tuple(self, test_self, func, num, p):
nose_func = wraps(func)(lambda *args: func(*args[:-1], **args[-1]))
nose_func.__doc__ = self.doc_func(func, num, p)
# Track the unbound function because we need to setattr the unbound
# function onto the class for nose to work (see comments above), and
# Python 3 doesn't let us pull the function out of a bound method.
unbound_func = nose_func
if test_self is not None:
# Under nose on Py2 we need to return an unbound method to make
# sure that the `self` in the method is properly shared with the
# `self` used in `setUp` and `tearDown`. But only there. Everyone
# else needs a bound method.
func_self = (
None if PY2 and detect_runner() == "nose" else
test_self
)
nose_func = make_method(nose_func, func_self, type(test_self))
return unbound_func, (nose_func,) + p.args + (p.kwargs or {},)
def assert_not_in_testcase_subclass(self):
parent_classes = self._terrible_magic_get_defining_classes()
if any(issubclass(cls, TestCase) for cls in parent_classes):
raise Exception("Warning: '@parameterized' tests won't work "
"inside subclasses of 'TestCase' - use "
"'@parameterized.expand' instead.")
def _terrible_magic_get_defining_classes(self):
""" Returns the set of parent classes of the class currently being defined.
Will likely only work if called from the ``parameterized`` decorator.
This function is entirely @brandon_rhodes's fault, as he suggested
the implementation: http://stackoverflow.com/a/8793684/71522
"""
stack = inspect.stack()
if len(stack) <= 4:
return []
frame = stack[4]
code_context = frame[4] and frame[4][0].strip()
if not (code_context and code_context.startswith("class ")):
return []
_, _, parents = code_context.partition("(")
parents, _, _ = parents.partition(")")
return eval("[" + parents + "]", frame[0].f_globals, frame[0].f_locals)
@classmethod
def input_as_callable(cls, input):
if callable(input):
return lambda: cls.check_input_values(input())
input_values = cls.check_input_values(input)
return lambda: input_values
@classmethod
def check_input_values(cls, input_values):
# Explicitly convery non-list inputs to a list so that:
# 1. A helpful exception will be raised if they aren't iterable, and
# 2. Generators are unwrapped exactly once (otherwise `nosetests
# --processes=n` has issues; see:
# https://github.com/wolever/nose-parameterized/pull/31)
if not isinstance(input_values, list):
input_values = list(input_values)
return [param.from_decorator(p) for p in input_values]
@classmethod
def expand(cls, input, name_func=None, doc_func=None, skip_on_empty=False,
**legacy):
""" A "brute force" method of parameterizing test cases. Creates new
test cases and injects them into the namespace that the wrapped
function is being defined in. Useful for parameterizing tests in
subclasses of 'UnitTest', where Nose test generators don't work.
"""
if "testcase_func_name" in legacy:
warnings.warn("testcase_func_name= is deprecated; use name_func=",
DeprecationWarning, stacklevel=2)
if not name_func:
name_func = legacy["testcase_func_name"]
if "testcase_func_doc" in legacy:
warnings.warn("testcase_func_doc= is deprecated; use doc_func=",
DeprecationWarning, stacklevel=2)
if not doc_func:
doc_func = legacy["testcase_func_doc"]
doc_func = doc_func or default_doc_func
name_func = name_func or default_name_func
def parameterized_expand_wrapper(f, instance=None):
stack = inspect.stack()
frame = stack[1]
frame_locals = frame[0].f_locals
parameters = cls.input_as_callable(input)()
if not parameters:
if not skip_on_empty:
raise ValueError(
"Parameters iterable is empty (hint: use "
"`parameterized.expand([], skip_on_empty=True)` to skip "
"this test when the input is empty)"
)
return wraps(f)(lambda: skip_on_empty_helper())
digits = len(str(len(parameters) - 1))
for num, p in enumerate(parameters):
name = name_func(f, "{num:0>{digits}}".format(digits=digits, num=num), p)
# If the original function has patches applied by 'mock.patch',
# re-construct all patches on the just former decoration layer
# of param_as_standalone_func so as not to share
# patch objects between new functions
nf = reapply_patches_if_need(f)
frame_locals[name] = cls.param_as_standalone_func(p, nf, name)
frame_locals[name].__doc__ = doc_func(f, num, p)
# Delete original patches to prevent new function from evaluating
# original patching object as well as re-constructed patches.
delete_patches_if_need(f)
f.__test__ = False
return parameterized_expand_wrapper
@classmethod
def param_as_standalone_func(cls, p, func, name):
@wraps(func)
def standalone_func(*a):
return func(*(a + p.args), **p.kwargs)
standalone_func.__name__ = name
# place_as is used by py.test to determine what source file should be
# used for this test.
standalone_func.place_as = func
# Remove __wrapped__ because py.test will try to look at __wrapped__
# to determine which parameters should be used with this test case,
# and obviously we don't need it to do any parameterization.
try:
del standalone_func.__wrapped__
except AttributeError:
pass
return standalone_func
@classmethod
def to_safe_name(cls, s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
def parameterized_class(attrs, input_values=None, class_name_func=None, classname_func=None):
""" Parameterizes a test class by setting attributes on the class.
Can be used in two ways:
1) With a list of dictionaries containing attributes to override::
@parameterized_class([
{ "username": "foo" },
{ "username": "bar", "access_level": 2 },
])
class TestUserAccessLevel(TestCase):
...
2) With a tuple of attributes, then a list of tuples of values:
@parameterized_class(("username", "access_level"), [
("foo", 1),
("bar", 2)
])
class TestUserAccessLevel(TestCase):
...
"""
if isinstance(attrs, string_types):
attrs = [attrs]
input_dicts = (
attrs if input_values is None else
[dict(zip(attrs, vals)) for vals in input_values]
)
class_name_func = class_name_func or default_class_name_func
if classname_func:
warnings.warn(
"classname_func= is deprecated; use class_name_func= instead. "
"See: https://github.com/wolever/parameterized/pull/74#issuecomment-613577057",
DeprecationWarning,
stacklevel=2,
)
class_name_func = lambda cls, idx, input: classname_func(cls, idx, input_dicts)
def decorator(base_class):
test_class_module = sys.modules[base_class.__module__].__dict__
for idx, input_dict in enumerate(input_dicts):
test_class_dict = dict(base_class.__dict__)
test_class_dict.update(input_dict)
name = class_name_func(base_class, idx, input_dict)
test_class_module[name] = type(name, (base_class,), test_class_dict)
# We need to leave the base class in place (see issue #73), but if we
# leave the test_ methods in place, the test runner will try to pick
# them up and run them... which doesn't make sense, since no parameters
# will have been applied.
# Address this by iterating over the base class and remove all test
# methods.
for method_name in list(base_class.__dict__):
if method_name.startswith("test_"):
delattr(base_class, method_name)
return base_class
return decorator
def get_class_name_suffix(params_dict):
if "name" in params_dict:
return parameterized.to_safe_name(params_dict["name"])
params_vals = (
params_dict.values() if PY3 else
(v for (_, v) in sorted(params_dict.items()))
)
return parameterized.to_safe_name(next((
v for v in params_vals
if isinstance(v, string_types)
), ""))
def default_class_name_func(cls, num, params_dict):
suffix = get_class_name_suffix(params_dict)
return "%s_%s%s" % (
cls.__name__,
num,
suffix and "_" + suffix,
) | zzsukitest | /zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/parameterized.py | parameterized.py |
import hmac
import hashlib
import base64
import urllib.parse
import requests
import os
import smtplib
import time
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
class SendEmail:
"""Send mail"""
def __init__(self, host, user, password, port=465):
"""
:param host: smtp server address
:param port: smtp server report
:param user: Email account number
:param password: SMTP service authorization code of mailbox
"""
if port == 465 or port == 587:
self.smtp = smtplib.SMTP_SSL(host=host, port=port)
else:
self.smtp = smtplib.SMTP(host=host, port=port)
self.smtp.login(user=user, password=password)
self.user = user
def send_email(self, subject="test report", content=None, filename=None, to_addrs=None):
"""
:param subject:Email subject
:param content: Email content
:param filename: Attachment document
:param to_addrs: Addressee's address
:type to_addrs: str or list
:return:
"""
msg = MIMEMultipart()
msg["Subject"] = subject
msg["From"] = self.user
if isinstance(to_addrs, str):
msg["To"] = to_addrs
elif to_addrs and isinstance(to_addrs, list):
msg["To"] = to_addrs[0]
if not content:
content = time.strftime("%Y-%m-%d-%H_%M_%S") + ":测试报告"
text = MIMEText(content, _subtype="html", _charset="utf8")
msg.attach(text)
if filename and os.path.isfile(filename):
with open(filename, "rb") as f:
content = f.read()
try:
report = MIMEApplication(content, _subtype=None)
except Exception:
report = MIMEApplication(content)
name = os.path.split(filename)[1]
report.add_header('content-disposition', 'attachment', filename=name)
msg.attach(report)
try:
self.smtp.send_message(msg, from_addr=self.user, to_addrs=to_addrs)
except Exception as e:
print("Failed to send test report")
raise e
else:
print("The test report has been sent")
class DingTalk:
"""Nail group notification occurred"""
def __init__(self, url, data, secret=None):
"""
:param url: Dingtalk robot webhook address
:param data:Message sent (refer to the official message type)
:param secret: (not required) if the robot has set the signature security, it needs to pass in the signature key
"""
self.url = url
self.data = data
self.secret = secret
def get_stamp(self):
"""Countersign"""
timestamp = str(round(time.time() * 1000))
secret_enc = self.secret.encode('utf-8')
string_to_sign = '{}\n{}'.format(timestamp, self.secret)
string_to_sign_enc = string_to_sign.encode('utf-8')
hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()
sign = urllib.parse.quote_plus(base64.b64encode(hmac_code))
return {"sign": sign, "timestamp": timestamp}
def send_info(self):
"""send info"""
if self.secret:
params = self.get_stamp()
else:
params = None
response = requests.post(url=self.url, json=self.data, params=params)
return response
class WeiXin:
"""
Enterprise wechat group notice
"""
base_url = "https://qyapi.weixin.qq.com/cgi-bin/appchat/send?access_token="
def __init__(self, access_token=None, corp_id=None, corp_secret=None):
"""
:param corp_id: wechat corp_id
:param corp_secret:Applied credential key
"""
self.corp_id = corp_id
self.corp_secret = corp_secret
if access_token:
self.access_token = access_token
elif corp_id and corp_secret:
self.access_token = self.get_access_token()
else:
raise ValueError("access_token and [corpid, corpsecret] cannot both be empty. "
"At least one of them must be passed in")
def get_access_token(self):
"""get access_token"""
url = "https://qyapi.weixin.qq.com/cgi-bin/gettoken"
params = {
"corpid": self.corp_id,
"corpsecret": self.corp_secret
}
result = requests.get(url=url, params=params).json()
if result.json()['errcode'] != 0:
raise ValueError(result["errmsg"])
return result["access_token"]
def send_info(self, data):
"""send info"""
url = self.base_url + self.access_token
response = requests.post(url=url, data=data)
return response
if __name__ == '__main__':
# url = "https://oapi.dingtalk.com/robot/send?access_token=690900b5ce6d5d10bb1218b8e64a4e2b55f96a6d116aaf50"
# data = {
# "msgtype": "markdown",
# "markdown": {
# "title": "自动化测试报告",
# "text": open('python31.md', 'r', encoding='utf-8').read()
# },
# "at": {
# "atMobiles": [],
# "isAtAll": False
# }
# }
# ding = DingTalk(url=url, data=data)
# res = ding.send_info()
# print(res)
pass | zzsukitest | /zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/result_push.py | result_push.py |
from functools import wraps
import json
import yaml
def _create_test_name(index, name):
if index + 1 < 10:
test_name = name + "_00" + str(index + 1)
elif index + 1 < 100:
test_name = name + "_0" + str(index + 1)
else:
test_name = name + "_" + str(index + 1)
return test_name
def _update_func(new_func_name, params, test_desc, func, *args, **kwargs):
@wraps(func)
def wrapper(self):
return func(self, params, *args, **kwargs)
wrapper.__wrapped__ = func
wrapper.__name__ = new_func_name
wrapper.__doc__ = test_desc
return wrapper
def ddt(cls):
"""
:param cls: 测试类
:return:
"""
for name, func in list(cls.__dict__.items()):
if hasattr(func, "PARAMS"):
for index, case_data in enumerate(getattr(func, "PARAMS")):
new_test_name = _create_test_name(index, name)
if isinstance(case_data, dict) and case_data.get("title"):
test_desc = str(case_data.get("title"))
elif isinstance(case_data, dict) and case_data.get("desc"):
test_desc = str(case_data.get("desc"))
elif (not isinstance(case_data, str)) and hasattr(case_data, 'title'):
test_desc = str(case_data.title)
else:
test_desc = func.__doc__
func2 = _update_func(new_test_name, case_data, test_desc, func)
setattr(cls, new_test_name, func2)
else:
delattr(cls, name)
return cls
def list_data(datas):
"""
:param datas: 测试数据
:return:
"""
def wrapper(func):
setattr(func, "PARAMS", datas)
return func
return wrapper
def yaml_data(file_path):
"""
:param file_path: yaml文件路径
:return:
"""
def wrapper(func):
try:
with open(file_path, "r", encoding="utf-8") as f:
datas = yaml.load(f, Loader=yaml.FullLoader)
except UnicodeDecodeError:
with open(file_path, "r", encoding="gbk") as f:
datas = yaml.load(f, Loader=yaml.FullLoader)
setattr(func, "PARAMS", datas)
return func
return wrapper
def json_data(file_path):
"""
:param file_path: json文件路径
:return:
"""
def wrapper(func):
try:
with open(file_path, "r", encoding="utf-8") as f:
datas = json.load(f)
except UnicodeDecodeError:
with open(file_path, "r", encoding="gbk") as f:
datas = json.load(f)
setattr(func, "PARAMS", datas)
return func
return wrapper | zzsukitest | /zzsukitest-1.0.6.tar.gz/zzsukitest-1.0.6/zzsuki_test/core/data_driver.py | data_driver.py |
import keyword
# 实现了所以关键字的列出
"""
num = 10
# print(num, id(num))
# num = 30
del num
print(num, id(num))
"""
"""result = input('请输入bool型的参数: ')
print('输入的参数: ', result, type(0))
if result:
print('你好,沐言科技')
"""
"""score = 10.0
if 90<=score<=100:
print("优等生")
elif 60<=score<90:
print("良等生")
else:
print("差等生")
"""
'''num_a = 0
num_b = 1
while num_b<=1000:
print(num_b, end='、')
num_a, num_b = num_b, num_a + num_b
'''
# 元组和list互相转化
'''number = ('你好', '哈哈', '休息')
infos = [1, 2, 3, 4]
test = tuple(infos)
print('[数据类型]列表: %s ' % list(number))
print('[元组的数据类型]: %s' % type(test))
'''
'''
def get_info():
print('hello python')
return '你好呀'
data = get_info()
print(data)
'''
'''
def echo(title, url):
return '【带有参数的函数】,标题: {} ,地址: {}'.format(title, url)
print(echo(url='www.baidu.com', title='python'))
'''
'''
num = 100
def change_num():
global num
num = 30
change_num()
print('【全局变量】num=%s' % num)
'''
"""
def print_doc():
'''
测试__doc__全局变量的调用,无任何的方法体
:return:
'''
pass
print(print_doc.__doc__)
"""
"""
def print_data(count):
def out(data):
nonlocal count
count += 1
return "【第{}次输出数据】: {}".format(count, data)
return out
oa = print_data(0)
print(oa('哈哈哈哈'))
print()
print(eval('\n"-"*50\n'))
print()
import this
"""
"""
import sys
print('【执行平台信息】:%s'%(sys.platform))
print('【执行平台信息】:%s'%(sys.path))
"""
"""
import sys
print('【参数信息】:%s'%(sys.argv))
if len(sys.argv)==1:
print('没有输入参数,无法正确执行,程序退出!!!')
sys.exit(0)
else:
print('正确输入参数,程序结束', end="")
for item in sys.argv:
print(item, end='、')
"""
from random import *
numbers = [item for item in range(1, 10)]
print('【原始数据】:%s' % numbers)
print('-' * 50)
filter_result = list(filter(lambda item: item % 2 == 0, numbers))
print('【filter过滤数据】: %s' % filter_result)
print('-' * 50)
map_result = list(map(lambda item: item * 2, filter_result))
print('【map处理数据】: %s' % map_result)
print('-' * 50)
from functools import reduce
reduce_result = reduce(lambda x, y: x + y, map_result)
print('【reduce处理数据】: %s' % reduce_result) | zzt-message | /zzt_message-0.1-py3-none-any.whl/com/zzt/info/demo02.py | demo02.py |
import pandas as pd
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers,activations
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import ModelCheckpoint
#SE模块
#如需使用,得加到下采样层里
class Squeeze_excitation_layer(tf.keras.Model):
def __init__(self, filter_sq):
# filter_sq 是 Excitation 中第一个卷积过程中卷积核的个数
super().__init__()
self.avepool = tf.keras.layers.GlobalAveragePooling1D()
self.dense = tf.keras.layers.Dense(filter_sq)
self.relu = tf.keras.layers.Activation('relu')
self.sigmoid = tf.keras.layers.Activation('sigmoid')
def call(self, inputs):
squeeze = self.avepool(inputs)
excitation = self.dense(squeeze)
excitation = self.relu(excitation)
excitation = tf.keras.layers.Dense(inputs.shape[-1])(excitation)
excitation = self.sigmoid(excitation)
excitation = tf.keras.layers.Reshape((1, inputs.shape[-1]))(excitation)
scale = inputs * excitation
return scale
#下采样层
class DownSample(tf.keras.layers.Layer):
#units,使用多少个filter
#k_size:确定第一个卷积层的kernel_size
def __init__(self,units,is_pool=True,use_se = False,k_size=3):
super(DownSample,self).__init__()
#注意,原始unet是valid填充,此处简化为same填充
self.conv1 = tf.keras.layers.Conv1D(units,kernel_size=k_size,
padding = 'same')
self.conv2 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
if is_pool:
self.pool = tf.keras.layers.MaxPooling1D(pool_size=2)
else:
self.pool=False
if use_se:
self.se = Squeeze_excitation_layer(units)
else:
self.se = False
def call(self,x):
if self.pool:
x = self.pool(x)
x = self.conv1(x)
x = tf.nn.relu(x)
x = self.conv2(x)
x = tf.nn.relu(x)
if self.se:
x = self.se(x)
return x
#上采样层
class UpSample(tf.keras.layers.Layer):
#units,使用多少个filter
def __init__(self,units):
super(UpSample,self).__init__()
#注意,原始unet是valid填充,此处简化为same填充
self.conv1 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
self.conv2 = tf.keras.layers.Conv1D(units,kernel_size=3,
padding = 'same')
#反卷积上采样,注意上采样的stride是放大的关键
#注意上采样中
self.deconv = tf.keras.layers.Conv1DTranspose(units//2,kernel_size=2,
padding = 'same',strides=2)
#输入x,是否添加pool的控制属性
def call(self,x):
x = self.conv1(x)
x = tf.nn.relu(x)
x = self.conv2(x)
x = tf.nn.relu(x)
x = self.deconv(x)
x = tf.nn.relu(x)
return x
#现在其实是SE-uNET
class SE_Unet_1d(tf.keras.Model):
def __init__(self):
super(SE_Unet_1d,self).__init__()
#第一层64个卷积核
#注意is_pool在call时候传入,必须注意
#第一层给11,会不会好一些
self.down1 = DownSample(64,is_pool=False,k_size=11)
#都不加use_se=True,就是原始的
self.down2 = DownSample(128,use_se=True)
self.down3 = DownSample(256,use_se=True)
self.down4 = DownSample(512,use_se=True)
self.down5 = DownSample(1024)
#单独定义一个上采样
self.up = tf.keras.layers.Conv1DTranspose(512,kernel_size=2,
strides=2,padding='same'
)
self.up1 = UpSample(512)
self.up2 = UpSample(256)
self.up3 = UpSample(128)
#注意此处借用下采样,is_pool设false
self.last_conv = DownSample(64,is_pool=False)
#对每个像素点进行回归(只需要1个filter,n分类需要n个filter)
self.out_conv = tf.keras.layers.Conv1D(1,kernel_size=1,
padding = 'same')
def call(self,inputs):
x1 = self.down1(inputs)
x2 = self.down2(x1)
x3 = self.down3(x2)
x4 = self.down4(x3)
x5 = self.down5(x4)
x6 = self.up(x5)
#合并
x7 = tf.concat([x4,x6],2)
x8 = self.up1(x7)
x9 = tf.concat([x3,x8],2)
x10 = self.up2(x9)
x11 = tf.concat([x2,x10],2)
x12 = self.up3(x11)
x13 = tf.concat([x1,x12],2)
x14 = self.last_conv(x13)
out = self.out_conv(x14)
return out | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/tf_model.py | tf_model.py |
import time #内置模块
import pysam
import pyBigWig
import numpy as np
import pandas as pd
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/get_dataset.py | get_dataset.py |
import os
import re
import numpy as np
import pandas as pd
import torch
import time #内置模块
import pysam
import pyBigWig
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df
def get_torch_pfm(bed_file_path,fasta_file_path,model_path,weight_name='module.model.conv_net.0.weight',
filter_size=8,channel_num=320,seq_len=1000,
genome_size_control_path=None,lim__=0.75,data_batch=320,all_batch=45,stochastic_control=False):
print('您设定的阈值为最大激活值的'+str(lim__*100)+'%')
model_analysis = torch.load(model_path)
#查看参数字典的键(即网络有哪些层)
#model_analysis['state_dict'].keys()
weight0 = model_analysis['state_dict'][weight_name]
#此处也可以参数化
motif_ocr_seq = genome_dataset(bed_file_path,fasta_file_path,seq_len=seq_len,
genome_size_control=genome_size_control_path,dataset_type='classification',bw_path=None,Data_Augmentation=False)
#torch的通道位置和tf不一样
#此后将使用多次维度交换,统一到tf形式进行pfm矩阵构建
motif_ocr_hot = np.array(list(motif_ocr_seq.seq_one_hot)).swapaxes(1,2)
ocr_tensor = torch.from_numpy(motif_ocr_hot).cuda().float()
# 定义卷积层,注意卷积核数目和filter数目的问题
w=torch.nn.Conv1d(4,channel_num,filter_size).cuda()
# 把Tensor值作为权值赋值给Conv层,需要先转为torch.nn.Parameter类型
w.weight=torch.nn.Parameter(weight0)
conv_list = []
for i in range(all_batch):
#保证顺序
comput_ocr = ocr_tensor[int(i*data_batch):int((i+1)*data_batch)]
conv_out = w(comput_ocr)
conv_out=torch.Tensor.cpu(conv_out).detach().numpy()
conv_list.append(conv_out)
conv_array = np.array(conv_list)
final_conv_out = np.concatenate(conv_array,axis=0).swapaxes(1,2)
weight_use = torch.Tensor.cpu(weight0).detach().numpy()
weight_use = weight_use.swapaxes(0,2)
filter_all_position = []
for i in range(weight_use.shape[2]):
filter_position = []
for j in range(final_conv_out.shape[0]):
one_filter_weight = weight_use[:,:,i]
one_filter_scan_out = final_conv_out[j,:,i]
MAX = np.sum(np.max(one_filter_weight,1))
position = list(np.where(one_filter_scan_out >= MAX*lim__))
#循环结束后,filter_position存有一个filter的所有激活位置
#空的也要保存,因为要标识序列编号
filter_position.append(position)
filter_all_position.append(filter_position)
filter_all_position=np.array(filter_all_position)
#tensor转numpy、轴交换
ocr_numpy = ocr_tensor.detach().cpu().numpy()
ocr_numpy= ocr_numpy.swapaxes(1,2)
#获取对应序列
filter_seq=[]
#第一层遍历312个filter
for i in filter_all_position:
seq_ = []
#第二层遍历序列
for j in range(len(i)):
if len(i[j][0])>=1:
for k in i[j][0]:
#注意这里对应filter_size
seq_.append(ocr_numpy[j,k:k+filter_size,:])
filter_seq.append(seq_)
filter_seq_used = []
for i in filter_seq:
if stochastic_control==True:
if len(i)>= (seq_len-filter_size+1)*data_batch*all_batch*((0.25)**(np.floor(filter_size*lim__))):
filter_seq_used.append(i)
if stochastic_control==False:
if len(i)>= 10:
filter_seq_used.append(i)
final_pfm = []
for i in filter_seq_used:
final_pfm.append(np.sum(np.array(i),axis=0).T)
print('共获取'+str(len(final_pfm))+'个激活序列数在指定值'+'之上的pfm矩阵')
return final_pfm
#要求输入array
def get_meme_input_file(pfms_,meme_path):
pfms_ = np.array(pfms_)
ppm = []
for i in pfms_:
ppm.append(i/np.sum(i,axis=0))
with open(meme_path,'w')as f:
f.write('MEME version 5.3.3')
f.write('\n')
f.write('\n')
f.write('ALPHABET = ACGT')
f.write('\n')
f.write('\n')
f.write('strands: + -')
f.write('\n')
f.write('\n')
f.write('Background letter frequencies')
f.write('\n')
f.write('A 0.25 C 0.25 G 0.25 T 0.25')
f.write('\n')
f.write('\n')
for i in range(len(ppm)):
f.write('MOTIF\tmotif_ppm'+str(i))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= '+str(len(ppm[i].T)))
f.write('\n')
for j in ppm[i].T:
f.write(str(j[0])+'\t'+str(j[1])+'\t'+str(j[2])+'\t'+str(j[3]))
f.write('\n')
f.write('\n')
print('结果文件已存至'+meme_path+'\n'+'该文件具有meme的tomtom工具所需的输入格式')
def get_torch_motif(meme_path,bed_file_path,fasta_file_path,model_path,weight_name='module.model.conv_net.0.weight',
filter_size=8,channel_num=320,seq_len=1000,
genome_size_control_path=None,lim__=0.75,data_batch=320,all_batch=45,stochastic_control=False):
get_meme_input_file(get_torch_pfm(bed_file_path,fasta_file_path,model_path,weight_name=weight_name,
filter_size=filter_size,channel_num=channel_num,seq_len=seq_len,
genome_size_control_path=None,lim__=lim__,data_batch=data_batch,all_batch=all_batch,stochastic_control=stochastic_control),meme_path=meme_path) | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/cnn_torch_motif_1d.py | cnn_torch_motif_1d.py |
import numpy as np
import wget
import pandas as pd
from random import randint, sample
#获取pfm
def get_pfm(taxonomic_groups=str('plants'),data_local = None):
if data_local == None:
if taxonomic_groups=='plants':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_plants_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='fungi':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_fungi_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='vertebrates':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_vertebrates_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
elif taxonomic_groups=='insects':
DATA_URL = 'http://jaspar.genereg.net/download/CORE/JASPAR2020_CORE_insects_non-redundant_pfms_jaspar.txt'
out_fname='./plants.txt'
wget.download(DATA_URL, out=out_fname)
pre_pfm = pd.read_csv('./plants.txt',
sep='\t',
header=None)
else:
#接收从JASPAR下载的文件作为输入
pre_pfm = pd.read_csv(str(data_local),
sep='\t',
header=None)
pfm = []
for i in range(0,len(pre_pfm),5):
pfm_sample = []
for j in range(i+1,i+5):
str_pfm = pre_pfm.iloc[j,0][4:-1].strip().split()
int_pfm = [int(k) for k in str_pfm]
pfm_sample.append(np.array(int_pfm))
pfm.append(np.array(pfm_sample).astype('float32'))
#返回记录有多个pfm数组的列表,每个pfm数组的shape均为4*L(L为长度)
print('There are '+str(len(pfm))+ ' PFMs '+str('!'))
print('You need to consider whether the number of CNN filters you use is suitable for this initialization method.')
return pfm
#获取ppm
def get_ppm(pfm_ ):
ppm = []
for k in pfm_:
ppm.append(k)
for i in range(len(ppm)):
for j in range(len(ppm[i][0])):
a = ppm[i][0][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
b = ppm[i][1][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
c = ppm[i][2][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
d = ppm[i][3][j] / (ppm[i][0][j] + ppm[i][1][j] +
ppm[i][2][j] + ppm[i][3][j])
ppm[i][0][j] = a
ppm[i][1][j] = b
ppm[i][2][j] = c
ppm[i][3][j] = d
return ppm
#计算信息熵,越小越好;注意,这里加了1e-5,防止0的影响
def compute_Information_entropy(acgt):
return -1*np.sum(np.log2(np.array(acgt)+1e-5)* (np.array(acgt)+1e-5))
#获得一组定长的ppm
def get_ppm_L(ppm,L_ = 8):
ppm_L = []
num_drop = 0
num_L = 0
f = lambda x: compute_Information_entropy(x)
for i in ppm:
#如果长度小于指定值
if len(i[0])<L_:
num_drop+=1
#恰好等于全部的保留
elif len(i[0])==L_:
ppm_L.append(i)
num_L+=1
#大于的情况,保留信息熵最大的L-mer
else:
min_Information_entropy = 2
final_number = 0
for j in range(0,len(i)-L_):
if np.sum([f(a) for a in i.T[j:j+L_]])<min_Information_entropy:
min_Information_entropy = np.sum([f(a) for a in i.T[j:j+L_]])
final_number = j
else:
pass
ppm_L.append(i[:,final_number:final_number+L_])
print(str(num_drop)+ ' PFMs with lengths less than the specified length have been screened out.')
print( 'All '+str(num_L) + ' PFMs of length exactly equal to the specified length are retained.')
print('For PFMs with lengths greater than the specified length, the segment with the highest information entropy is intercepted.')
return ppm_L
#获得一组定长的ppm,每个值-0.25,均值近0
def get_ppm_rp25_L(ppm,L_ = 8):
ppm_L = []
num_drop = 0
num_L = 0
f = lambda x: compute_Information_entropy(x)
for i in ppm:
#如果长度小于指定值
if len(i[0])<L_:
num_drop+=1
#恰好等于全部的保留
elif len(i[0])==L_:
ppm_L.append(i-0.25)
num_L+=1
#大于的情况,保留信息熵最大的L-mer
else:
min_Information_entropy = 2
final_number = 0
for j in range(0,len(i)-L_):
if np.sum([f(a) for a in i.T[j:j+L_]])<min_Information_entropy:
min_Information_entropy = np.sum([f(a) for a in i.T[j:j+L_]])
final_number = j
else:
pass
ppm_L.append(i[:,final_number:final_number+L_]-0.25)
print(str(num_drop)+ ' PFMs with lengths less than the specified length have been screened out.')
print( 'All '+str(num_L) + ' PFMs of length exactly equal to the specified length are retained.')
print('For PFMs with lengths greater than the specified length, the segment with the highest information entropy is intercepted.')
return ppm_L
def get_pwm(ppm_L,background_acgt = [0.25,0.25,0.25,0.25]):
pwm_L = []
for i in range(len(ppm_L)):
pwm_L_sample = []
for j in range(4):
#这里加1e-3是防止结果中出现0
#这里的问题在于,正向最多到2,而负向可以到-inf(很大的负数)
#考虑如何改,或者直接用ppm?
#这会有一些问题哦——————
pwm_L_sample.append(list(np.log2((ppm_L[i][j]+1e-2)/background_acgt[j])))
pwm_L.append(np.array(pwm_L_sample))
return np.array(pwm_L)
def filter_initialization_matrix(taxonomic_groups='plants',data_local = None,
filters=64,
L_=8,
pattern='ppm_rp25',
background_acgt=[0.25, 0.25, 0.25, 0.25]):
print('Note that the base order in the return result matrix is ACGT')
#只有pwm模式,需要background_acgt
if pattern == 'ppm_rp25':
print(
'You will get the PPM_R25 matrix((the value of each position of the PPM matrix is subtracted by 0.25)) with the specified number and length.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_rp25_L(ppm, L_)
sample_number = [randint(0, len(ppm_L) - 1) for _ in range(filters)]
ppm_r25_filters = []
for i in sample_number:
ppm_r25_filters.append(ppm_L[i])
print('You will get the numpy array with shape ',
str(np.array(ppm_r25_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(ppm_r25_filters)
elif pattern == 'ppm':
print(
'You will get the PPM matrix with the specified number and length.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_L(ppm, L_)
sample_number = [randint(0, len(ppm_L)-1) for _ in range(filters)]
ppm_filters = []
for i in sample_number:
ppm_filters.append(ppm_L[i])
print('You will get the numpy array with shape ',
str(np.array(ppm_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(ppm_filters)
elif pattern == 'pwm':
print(
'You will get the PWM matrix of the specified length and the specified number calculated with '
+ str(background_acgt) + ' as the background.',
'To prevent negative infinity, the value of 1e-2 is added to all positions.'
)
pfm = get_pfm(taxonomic_groups,data_local )
ppm = get_ppm(pfm)
ppm_L = get_ppm_L(ppm, L_)
pwm = get_pwm(ppm_L, background_acgt)
sample_number = [randint(0, len(pwm)-1) for _ in range(filters)]
pwm_filters = []
for i in sample_number:
pwm_filters.append(pwm[i])
print('You will get the numpy array with shape ',
str(np.array(pwm_filters).shape))
print(
"You can use numpy's swaaxes function to make the dimension transformation suitable for initializing your parameters"
)
return np.array(pwm_filters) | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/filter_initialization.py | filter_initialization.py |
import os
import re
import numpy as np
import pandas as pd
import tensorflow as tf
import time #内置模块
import pysam
import pyBigWig
from pysam import FastaFile
from scipy.ndimage import gaussian_filter1d
#辅助函数,用于one-hot编码
#用此函数对100万条1000长度的序列编码需要约700秒(GPU02节点)
def one_hot_dna(dna):
dna_dict={'A':[1.0,0.,0.,0.],'C':[0.,1.0,0.,0.],'G':[0.,0.,1.0,0.],'T':[0.,0.,0.,1.0],'N':[0.,0.,0.,0.],
'a':[1.0,0.,0.,0.],'c':[0.,1.0,0.,0.],'g':[0.,0.,1.0,0.],'t':[0.,0.,0.,1.0],'n':[0.,0.,0.,0.]}
return np.array([dna_dict[k] for k in dna])
#辅助函数,控制获得固定长度开放区序列
def get_new_bed_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
def get_new_bed4_df(bed_df , seq_len):
diff = np.array(bed_df.end - bed_df.start)-seq_len
new_start_list = list(np.rint(np.array(bed_df.start) + np.floor(diff*0.5)))
new_end_list = list(np.rint(np.array(bed_df.end) - np.ceil(diff*0.5)))
#到此处为止,未破坏bed文件原始结构
new_bed3_df = pd.DataFrame({ 'chr':bed_df.chr,'start':new_start_list,'end':new_end_list,'classes':bed_df.classes})
new_bed3_df.start = new_bed3_df.start.astype(int)
new_bed3_df.end = new_bed3_df.end.astype(int)
return new_bed3_df
#根据控制文件(记录染色体号和长度,tab间隔)过滤样本
def sample_control(bed_df,genome_control_df):
#Python内置函数locals(),返回记录当前所有局部变量的字典
#此处用于局部变量的动态创建
ld = locals()
for i in range(len(genome_control_df)):
bed_chr_ = bed_df[bed_df.chr == genome_control_df.iloc[i,0]]
bed_chr = bed_chr_[bed_chr_.end<genome_control_df.iloc[i,1]]
ld['sample_' + str(genome_control_df.iloc[i,0])] =bed_chr[bed_chr.start>0]
bed_df_all = []
for j in genome_control_df.chr:
bed_df_all.append(ld['sample_' + j])
bed_df_control = pd.concat(bed_df_all,axis=0)
print(len(bed_df)-len(bed_df_control),'个样本被筛除,因为它们不在control文件中或超越了control文件中规定的界限')
return bed_df_control
#这里的第二个参数可以给bed_df而非bed原始文件,即只作为辅助函数,不独立
#约91秒可以完成10万1024长度序列的onehot编码并返回
def get_one_hot_seq_list(fasta_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'条序列进行one-hot编码')
start_time = time.time()
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq=[]
for index,data in sample_bed_df.iterrows():
seq.append(one_hot_dna(fasta_file.fetch(data.chr,data.start,data.end) ))
end_time = time.time()
print('序列one-hot编码完成,该步骤累计耗时:',end_time-start_time,'秒')
return seq
def get_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_regression = []
for index,data in sample_bed_df.iterrows():
#bw.stats在范围内取均值
bw_regression.append(bw_file.stats(str(data.chr),int(data.start),int(data.end))[0])
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_regression
def get_base_regression(bw_path,sample_bed_df):
print('对',str(len(sample_bed_df)),'个样本进行碱基分辨率信息提取')
start_time = time.time()
bw_file = pyBigWig.open(bw_path)
bw_base_regression = []
for index,data in sample_bed_df.iterrows():
#bw.values取范围内每个点的值
bw_base_regression.append(bw_file.values(str(data.chr),int(data.start),int(data.end)))
bw_file.close()
end_time = time.time()
print('信息提取完成,该步骤累计耗时:',end_time-start_time,'秒')
return bw_base_regression
#获取反向互补序列,用于数据增强
def DNA_complement(sequence):
trantab = str.maketrans('ACGTacgt', 'TGCAtgca') #翻译表
com_sequence = sequence[::-1].translate(trantab) # 反向、转换互补
return com_sequence
#获取序列而不是one-hot的序列
def get_seq_list(fasta_path,sample_bed_df):
fasta_file = pysam.FastaFile( fasta_path) #打开fasta文件,需要对应目录下有fasta文件的fai索引
seq_=[]
for index,data in sample_bed_df.iterrows():
seq_.append(fasta_file.fetch(data.chr,data.start,data.end))
return seq_
#为了针对分类问题,应该给定bed第四列作为类别标签!
def genome_dataset(bed_path,fasta_path,seq_len=1024,genome_size_control=None,dataset_type='regression',bw_path=None,Data_Augmentation=False):
#注意匹配,chr01或chr1,有没有0,大小写,和fasta、bw文件比较
#注意容错机制:文件头有无均可读(待实现)
with open(bed_path,'r')as f:
#第一行第一个数据单位(应该是chrom或者chrxx)
chek_bed = f.readline().strip().split('\t')[0]
if chek_bed[:3]!='chr' and chek_bed[:3]!='CHR' and chek_bed[:3]!='Chr':
raise IOError("bed文件格式不合规范,请检查!\n 注:各列需以tab间隔,无文件头")
else:
print('bed文件检查通过')
if dataset_type=='regression' or dataset_type=='base_regression':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:3]
bed_df.columns = ['chr','start','end']
#得到定长的bed文件
new_bed_df = get_new_bed_df(bed_df,seq_len)
elif dataset_type=='classification':
bed_df = pd.read_csv(bed_path,sep='\t',header=None).iloc[:,:4]
bed_df.columns = ['chr','start','end','classes']
new_bed_df = get_new_bed4_df(bed_df,seq_len)
else:
raise Exception("请选择正确的模式")
#样本控制
if genome_size_control!=None:
print('使用控制文件,将过滤起止位点不合文件要求的序列和未在文件中出现的染色体对应的序列')
genome_control = pd.read_csv(genome_size_control,sep='\t',names=['chr','control'])
control_bed_df = sample_control(new_bed_df,genome_control)
else:
control_bed_df = new_bed_df
print('您选择不使用控制文件')
#获取原始序列的one-hot编码结果
seq = get_one_hot_seq_list(fasta_path,control_bed_df)
sample_name = []
#获取sample_name,原始
for k in range(len(control_bed_df)):
sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2]))
if dataset_type=='classification':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
classes_all = list(control_bed_df.classes)+list(control_bed_df.classes)
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'classes':classes_all})
return dataset_df
else:
print('您选择不使用数据增强')
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'classes':list(control_bed_df.classes)})
return dataset_df
if dataset_type=='regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_regression = get_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_regression_all = bw_regression+bw_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_regression = get_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_regression})
return dataset_df
if dataset_type=='base_regression':
if Data_Augmentation:
print('通过取反向互补序列进行数据增强,经此步骤,您最终得到的样本量将翻倍')
#map返回迭代器,需要list转换
#返回反向互补序列
#这里错了,放进来的不能是seq
seq_ = get_seq_list(fasta_path,control_bed_df)
reverse_com_seq = list(map(DNA_complement , seq_))
#reverse_com_seq要在此处进行one-hot编码
seq_all = seq+list(map(one_hot_dna,reverse_com_seq))
bw_base_regression = get_base_regression(bw_path,control_bed_df)
print('获取反向互补序列的信号')
bw_base_regression_all = bw_base_regression+bw_base_regression
m_sample_name = []
for k in range(len(control_bed_df)):
m_sample_name.append(str(control_bed_df.iloc[k,0])+','+str(control_bed_df.iloc[k,1])+','+str(control_bed_df.iloc[k,2])+','+'Reverse_complementation')
sample_name_all = sample_name+m_sample_name
dataset_df = pd.DataFrame({'sample':sample_name_all,'seq_one_hot':seq_all,'target':bw_base_regression_all})
return dataset_df
else:
print('您选择不使用数据增强')
bw_base_regression = get_base_regression(bw_path,control_bed_df)
dataset_df = pd.DataFrame({'sample':sample_name,'seq_one_hot':seq,'target':bw_base_regression})
return dataset_df
#提取模型第一个卷积层的参数
def get_tf_conv0_weight(model_path,weight_name):
tf1_model_best_path = model_path
reader = tf.compat.v1.train.NewCheckpointReader(tf1_model_best_path)
var_to_shape_map = reader.get_variable_to_shape_map()
'''
#这一段用于找到第一个卷积层的名称
for key in var_to_shape_map:
print("tensor name: ", key)
print(reader.get_tensor(key)) # 打印出Tensor的值
'''
#basenji第一个卷积层训练好的参数
cnn0_weight = reader.get_tensor(weight_name)
return cnn0_weight
#lim__ = 0.75 阈值
def get_basenji_motif_pfms(bed_file_path,fasta_file_path,model_path,seq_len=1000,kernel_size=22,
weight_name='',weight_ = None,
genome_size_control_path=None,lim__ = 0.75,data_batch=320,all_batch=45,stochastic_control=False):
if weight_ is None:
weight0 = get_tf_conv0_weight(model_path,weight_name)
else:
weight0 = weight_
print('您设定的阈值为最大激活值的'+str(lim__*100)+'%')
##获取数据,one_hot编码的序列,这里也可以参数化
motif_ocr_seq = genome_dataset(bed_file_path,fasta_file_path,seq_len=seq_len,
genome_size_control=genome_size_control_path,dataset_type='classification',bw_path=None,Data_Augmentation=False)
motif_ocr_hot = np.array(list(motif_ocr_seq.seq_one_hot))
ocr_tensor = tf.convert_to_tensor(motif_ocr_hot)
ocr_tensor = tf.cast(ocr_tensor, dtype = tf.float32)
#卷积运算
conv_list = []
for i in range(all_batch):
#保证顺序
comput_ocr = ocr_tensor[int(i*data_batch):int((i+1)*data_batch)]
conv_out = tf.compat.v1.nn.conv1d(comput_ocr, filters=weight0, padding='VALID').numpy()
conv_list.append(conv_out)
conv_array = np.array(conv_list)
final_conv_out = np.concatenate(conv_array,axis=0)
#获取对应位置
filter_all_position = []
for i in range(weight0.shape[2]):
filter_position = []
for j in range(final_conv_out.shape[0]):
one_filter_weight = weight0[:,:,i]
one_filter_scan_out = final_conv_out[j,:,i]
MAX = np.sum(np.max(one_filter_weight,1))
position = list(np.where(one_filter_scan_out >= MAX*lim__))
#循环结束后,filter_position存有一个filter的所有激活位置
#空的也要保存,因为要标识序列编号
filter_position.append(position)
filter_all_position.append(filter_position)
#获取对应序列
filter_seq=[]
#第一层遍历312个filter
for i in filter_all_position:
seq_ = []
#第二层遍历序列
for j in range(len(i)):
if len(i[j][0])>=1:
for k in i[j][0]:
#注意这里对应filter_size
seq_.append(ocr_tensor[j,k:k+int(kernel_size),:])
filter_seq.append(seq_)
#拿掉扫到的序列数太少的filter
#是不是改成25-75分位数更好?
filter_seq_used = []
for i in filter_seq:
if stochastic_control==True:
if len(i)>= (seq_len-kernel_size+1)*data_batch*all_batch*((0.25)**(np.floor(kernel_size*lim__))):
filter_seq_used.append(i)
if stochastic_control==False:
if len(i)>= 10:
filter_seq_used.append(i)
final_pfm = []
for i in filter_seq_used:
final_pfm.append(np.sum(np.array(i),axis=0).T)
print('共获取'+str(len(final_pfm))+'个激活序列数在指定值以上的pfm矩阵')
return final_pfm
#要求输入array
def get_meme_input_file(pfms_,meme_path):
pfms_ = np.array(pfms_)
ppm = []
for i in pfms_:
ppm.append(i/np.sum(i,axis=0))
with open(meme_path,'w')as f:
f.write('MEME version 5.3.3')
f.write('\n')
f.write('\n')
f.write('ALPHABET = ACGT')
f.write('\n')
f.write('\n')
f.write('strands: + -')
f.write('\n')
f.write('\n')
f.write('Background letter frequencies')
f.write('\n')
f.write('A 0.25 C 0.25 G 0.25 T 0.25')
f.write('\n')
f.write('\n')
for i in range(len(ppm)):
f.write('MOTIF\tmotif_ppm'+str(i))
f.write('\n')
f.write('letter-probability matrix: alength= 4 w= '+str(len(ppm[i].T)))
f.write('\n')
for j in ppm[i].T:
f.write(str(j[0])+'\t'+str(j[1])+'\t'+str(j[2])+'\t'+str(j[3]))
f.write('\n')
f.write('\n')
print('结果文件已存至'+meme_path+'\n'+'该文件具有meme的tomtom工具所需的输入格式')
def get_tf_motif(meme_path,bed_file_path,fasta_file_path,model_path,seq_len=1000,kernel_size=22,
weight_name='',weight_ = None,
genome_size_control_path=None,lim__ = 0.75,data_batch=320,all_batch=45,stochastic_control=False):
get_meme_input_file(get_basenji_motif_pfms(bed_file_path,fasta_file_path,model_path,seq_len=seq_len,kernel_size=kernel_size,
weight_name=weight_name,weight_ = weight_,
genome_size_control_path=genome_size_control_path,lim__ = lim__,data_batch=data_batch,all_batch=all_batch,stochastic_control=stochastic_control
),meme_path=meme_path) | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/cnn_tf_motif_1d.py | cnn_tf_motif_1d.py |
import torch
import numpy as np
import torch
import torch.nn as nn
#最后一层pool改为1的DeeperDeepSEA
class DeeperDeepSEA_pool(nn.Module):
def __init__(self, sequence_length, n_targets):
super(DeeperDeepSEA_pool, self).__init__()
conv_kernel_size = 8
pool_kernel_size = 4
self.conv_net = nn.Sequential(
nn.Conv1d(4, 320, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(320, 320, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(320),
nn.Conv1d(320, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=pool_kernel_size, stride=pool_kernel_size),
nn.BatchNorm1d(480),
nn.Dropout(p=0.2),
nn.Conv1d(480, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=conv_kernel_size),
nn.ReLU(inplace=True),
nn.MaxPool1d(
kernel_size=44, stride=44),
nn.BatchNorm1d(960),
nn.Dropout(p=0.2)
)
self.classifier = nn.Sequential(
nn.Linear(960 , n_targets),
nn.ReLU(inplace=True),
nn.BatchNorm1d(n_targets),
nn.Linear(n_targets, n_targets),
nn.Sigmoid())
def forward(self, x):
"""
Forward propagation of a batch.
"""
out = self.conv_net(x)
reshape_out = out.view(out.size(0), 960 )
predict = self.classifier(reshape_out)
return predict
#res_attention_model
import math
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
#res模块
class Bottleneck(nn.Module):
expansion = 1 #
def __init__(self, inplanes, planes, stride=1, downsample=None,use_1x1conv=False):
super(Bottleneck, self).__init__()
self.conv_1 = nn.Conv1d(inplanes, planes, kernel_size=1, bias=False)
self.bn_1 = nn.BatchNorm1d(planes)
self.conv_2 = nn.Conv1d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn_2 = nn.BatchNorm1d(planes)
self.conv_3 = nn.Conv1d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn_3 = nn.BatchNorm1d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if use_1x1conv:
self.conv_4 = nn.Conv1d(inplanes, planes,kernel_size = 1, stride=stride)
else:
self.conv_4 = False
self.bn_res = nn.BatchNorm1d(planes)
def forward(self, x):
if self.conv_4:
residual = self.conv_4(x)
residual = self.bn_res(residual)
else:
residual = x
residual = self.bn_res(residual)
out = self.conv_1(x)
out = self.bn_1(out)
out = self.relu(out)
out = self.conv_2(out)
out = self.bn_2(out)
out = self.relu(out)
out = self.conv_3(out)
out = self.bn_3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
#通道注意力机制
class ChannelAttention(nn.Module):
def __init__(self, in_channel):
super(ChannelAttention, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool1d(1)
self.max_pool = nn.AdaptiveMaxPool1d(1)
self.fc1 = nn.Conv1d(in_channel, in_channel // 16, 1, bias=False)
self.relu1 = nn.ReLU()
self.fc2 = nn.Conv1d(in_channel // 16, in_channel, 1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
out = avg_out + max_out
return self.sigmoid(out)
#空间注意力机制
class SpatialAttention(nn.Module):
def __init__(self, kernel_size=3):
super(SpatialAttention, self).__init__()
self.conv1 = nn.Conv1d(2, 1, kernel_size=3, padding=1, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
avg_out = torch.mean(x, dim=1, keepdim=True)
max_out, _ = torch.max(x, dim=1, keepdim=True)
x = torch.cat([avg_out, max_out], dim=1)
x = self.conv1(x)
return self.sigmoid(x)
class RES_ATTENTION(nn.Module):
def __init__(self, sequence_length, n_targets):
super(RES_ATTENTION, self).__init__()
self.conv_h1 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size= 7),
nn.ReLU(inplace=True)
)
self.conv_h2 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=9,padding=1),
nn.ReLU(inplace=True)
)
self.conv_h3 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=11,padding=2),
nn.ReLU(inplace=True)
)
self.conv_h4 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=13,padding=3),
nn.ReLU(inplace=True)
)
self.conv_h5 = nn.Sequential(
nn.Conv1d(4, 32, kernel_size=15,padding=4),
nn.ReLU(inplace=True)
)
#接收n个通道的输入
self.ca = ChannelAttention(160)
self.sa = SpatialAttention()
self.conv = nn.Sequential(
Bottleneck(160,160),
Bottleneck(160,320,use_1x1conv=True),
#降低通道数
nn.Conv1d( 320,160, kernel_size=1),
nn.Conv1d( 160,24, kernel_size=1),
nn.ReLU(inplace=True),
#
nn.Conv1d(24,24,498),
nn.ReLU(inplace=True),
nn.Conv1d(24,24,497),
)
self.classifier = nn.Sequential(
nn.Sigmoid()
)
def forward(self, x):
#直接reshape,全局不用全连接层
out1 = self.conv_h1(x)
out2 = self.conv_h2(x)
out3 = self.conv_h3(x)
out4 = self.conv_h4(x)
out5 = self.conv_h5(x)
out_merge = torch.cat((out1,out2,out3,out4,out5),dim=1)
out_merge_ca = self.ca(out_merge) * out_merge
out_merge_sa = self.sa(out_merge_ca) * out_merge_ca
out_ = self.conv(out_merge_sa)
reshape_out = out_.view(out_.size(0), 24 )
predict = self.classifier(reshape_out)
return predict
class more_cnn_dilation(nn.Module):
def __init__(self, sequence_length, n_targets):
super(more_cnn_dilation, self).__init__()
self.conv_h1 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size= 7),
nn.ReLU(inplace=True)
)
self.conv_h2 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=9,padding=1),
nn.ReLU(inplace=True)
)
self.conv_h3 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=11,padding=2),
nn.ReLU(inplace=True)
)
self.conv_h4 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=13,padding=3),
nn.ReLU(inplace=True)
)
self.conv_h5 = nn.Sequential(
nn.Conv1d(4, 64, kernel_size=15,padding=4),
nn.ReLU(inplace=True)
)
self.conv = nn.Sequential(
nn.Conv1d(320, 320, kernel_size=7,stride=4),
nn.ReLU(inplace=True),
nn.BatchNorm1d(320),
nn.Conv1d(320, 480, kernel_size=1),
nn.Conv1d(480, 480, kernel_size=3,dilation=2,padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=3,dilation=4,padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=3,dilation=8,padding=7),
nn.ReLU(inplace=True),
nn.Conv1d(480, 480, kernel_size=7,stride =4),
nn.ReLU(inplace=True),
nn.BatchNorm1d(480),
nn.Dropout(p=0.2),
nn.Conv1d(480, 960, kernel_size=1),
nn.Conv1d(960, 960, kernel_size=3,dilation=2,padding=1),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=4,padding=3),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=8,padding=7),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=16,padding=15),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=32,padding=31),
nn.ReLU(inplace=True),
nn.Conv1d(960, 960, kernel_size=3,dilation=64,padding=63),
nn.ReLU(inplace=True),
nn.BatchNorm1d(960),
nn.Dropout(p=0.2),
#降低通道数
nn.Conv1d( 960,480, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv1d( 480,120, kernel_size=1),
nn.Conv1d( 120,24, kernel_size=1),
nn.ReLU(inplace=True),
nn.Conv1d(24,24,47)
)
self.classifier = nn.Sequential(
nn.Sigmoid()
)
def forward(self, x):
#直接reshape,全局不用全连接层
out1 = self.conv_h1(x)
out2 = self.conv_h2(x)
out3 = self.conv_h3(x)
out4 = self.conv_h4(x)
out5 = self.conv_h5(x)
out_merge = torch.cat((out1,out2,out3,out4,out5),dim=1)
out_ = self.conv(out_merge)
reshape_out = out_.view(out_.size(0), 24 )
predict = self.classifier(reshape_out)
return predict | zzx-deep-genome | /zzx_deep_genome-0.1.5-py3-none-any.whl/zzx_deep_genome/torch_model.py | torch_model.py |
=====
zzyzx
=====
Do you believe in the cloud? It's in fact only somebody else's computer.
Those might fail or get hacked.
Do you believe in bug-free software? Nah, it's more likely every now and
then a crash, a bug, a race condition or some other back luck will lead
to data corruption of the things that you work on.
Do you think you'll be able to access your notes in thirty years? It's
likely the data format they're stored in is going to be hard to read.
This is why I store all my notes in my e-mail. It's been there since the
1970s, it's going to be there in the 2050s. MIME and IMAP ensure the
data is more-less plaintext and easily human-readable even without any
tool support. Apple Notes support it on both OS X and iOS. Pure win.
But wait, what about software failure? What if a bug erases my notes or
there's a data center fire and the data restored from a backup is in
a state from two days ago? What about bitrot?
Enter ``zzyzx``.
This is the most primitive backup system ever. Set it up in cron on your
laptop or a server you control and it will create incremental backups
with history between runs (setting up a Mercurial repository). It also
creates useful symlinks to human-readable note titles so you can find
them more easily.
Installation
------------
It requires Python 3.5+ and Click. Just install it from PyPI::
$ pip install zzyzx
$ cat >~/.zzyzx
[server]
host=mail.example.com
user=john@example.com
pass=secret
[backup]
repo_path=~/Notes
ignore_prefix=INBOX.Notes
$ zzyzx backup
Markdown export
---------------
If you installed ``zzyzx[markdown]`` from PyPI, you can also run::
$ zzyzx md
This will generate a list of files that are a textual representation
of the notes' contents. This is useful for exporting Apple Notes into
systems that expect Markdown files, like
`Bear <http://www.bear-writer.com/>`_.
Configure your Markdown support adding a section like the following
to your `.zzyzx` config::
[markdown]
path=~/Dropbox/Notes
extension=.txt
headings=atx
Headings can be "atx" (simple hashes), "atx_closed" (symmetrical
hashes), or "underlined" (ReST-like).
Why the name ``zzyzx``?
-----------------------
It's the last place on Earth. It's the end of the world.
Known issues
------------
Don't put the repo path in Dropbox as it doesn't support symlinks and
your other computers will see a lot of duplicate files.
The Markdown export is not perfect because the HTML syntax used by
Apple Notes is pretty strange. I did what I could, tested against a few
hundred notes against macOS Sierra and iOS 10.2 (they are not consistent
between each other either).
Changes
-------
2017.1.0
~~~~~~~~
* the Markdown export update: generally sucks less
* also update the creation and modification date in the Markdown export
* allow customization of the Markdown export file extensions
* allow exporting folder-based hashtags (for example for use with Bear
editor)
2016.6.0
~~~~~~~~
* bugfix: slashes and backslashes weren't properly escaped for title
symlinks
2016.4.1
~~~~~~~~
* backwards incompatible: ``zzyzx`` functionality now available as
``zzyzx backup``
* new functionality: ``zzyzx md`` unpacks .eml into text files and
attachments, translating HTML into Markdown
* bugfix: existing and newly created filenames are normalized to NFD;
existing file tracking won't be so eager to delete files anymore on
OS X
2016.4.0
~~~~~~~~
* first published version
Authors
-------
Glued together by `Łukasz Langa <lukasz@langa.pl>`.
| zzyzx | /zzyzx-2017.1.0.tar.gz/zzyzx-2017.1.0/README.rst | README.rst |
# zzz
Python library that waits until something happens.
## Benefit
You will no longer have to write annoying `while`/`time.sleep()` checks
to wait until a variable is euqal to a certain value.
## Usage
It's real simple.
All you gotta do is just have an import statement:
```
from zzz import z
```
After that, you use the `z` function on any
variable/object/function/method/thing ("VOFMT"). You pass the aforementioned
VOFMT as the first argument (`variable`), a `value` that the VOFMT should be
equal to, and lastly an optional `delay` argument, which determines how long to
wait between the checks for the aforementioned conditional equivalence.
## Author
`zzz` was written by David Gay.
## License
AGPLv3+. See `LICENSE` file for full text.
## A note about formatting
I believe that Markdown is superior to ReStructured Text and do not care that
PyPI only parses ReStructured Text. You will have to deal with it. You are a
smart person. Crack the readme open in your text editor, toss it through a
Markdown renderer, or deal with it. You will have many worse moments before
your continual decay leads to your inevitable final breath and the collection
of atoms called "you" disperse and take their place within other beings.
| zzz | /zzz-0.0.2.tar.gz/zzz-0.0.2/README.md | README.md |
import sys
import os
import re
import time
import copy
from threading import Thread
from collections import OrderedDict
from sgtn_properties import Properties
from sgtn_util import FileUtil, NetUtil, SysUtil
from sgtn_util import LOG_TYPE_INFO, KEY_RESULT, KEY_HEADERS
from sgtn_bykey import SingletonByKey
from sgtn_locale import SingletonLocaleUtil
from sgtn_py_base import SgtnException
from I18N import Config, Release, Translation
KEY_LOCALE = 'locale'
KEY_SOURCE = 'source'
KEY_ITEMS = 'format_items'
KEY_RESPONSE = 'response'
KEY_CODE = 'code'
KEY_DATA = 'data'
KEY_MESSAGES = 'messages'
KEY_PRODUCT = 'product'
KEY_VERSION = 'l10n_version'
KEY_SERVICE_URL = 'online_service_url'
KEY_OFFLINE_URL = 'offline_resources_base_url'
KEY_LOCAL_PATH = 'offline_resources_path'
KEY_DEFAULT_LOCALE = 'default_locale'
KEY_SOURCE_LOCALE = 'source_locale'
KEY_TRYDELAY = 'try_delay'
KEY_INTERVAL = 'cache_expired_time'
KEY_CACHEPATH = 'cache_path'
KEY_CACHETYPE = 'cache_type'
KEY_LOGPATH = 'log_path'
KEY_COMPONENTS = 'components'
KEY_LOCALES = 'locales'
KEY_LANG_TAG = 'language_tag'
KEY_COMPONENT_TAG = 'name'
KEY_COMPONENT_TEMPLATE = "component_template"
KEY_LOCALES_REFER = "locales_refer"
KEY_TEMPLATE = "template"
HEADER_REQUEST_ETAG = "If-None-Match"
LOCALE_DEFAULT = 'en-US'
MAX_THREAD = 1000
LOCAL_TYPE_FILE = 'file'
LOCAL_TYPE_HTTP = 'http'
RES_TYPE_PROPERTIES = '.properties'
RES_TYPE_SGTN = '.json'
class ClientUtil:
@classmethod
def check_response_valid(cls, dict):
if dict and KEY_RESULT in dict:
status = dict[KEY_RESULT].get(KEY_RESPONSE)
if status and KEY_CODE in status:
code = status[KEY_CODE]
if code == 200 or code == 604:
return True
return False
@classmethod
def read_resource_files(cls, local_type, file_list):
props = OrderedDict()
try:
for prop_file in file_list:
if prop_file.endswith(RES_TYPE_PROPERTIES):
text = None
if local_type == LOCAL_TYPE_HTTP:
text = NetUtil.http_get_text(prop_file)
else:
text = FileUtil.read_text_file(prop_file)
if text:
m = Properties().parse(text)
props.update(m)
elif prop_file.endswith(RES_TYPE_SGTN):
m = None
if local_type == LOCAL_TYPE_HTTP:
code, dt = NetUtil.http_get(prop_file, None)
if code == 200:
m = dt.get(KEY_RESULT)
else:
m = FileUtil.read_json_file(prop_file)
if m:
m = m.get(KEY_MESSAGES)
props.update(m)
except Exception as error:
raise IOError('Error in loading property file. Check file(s) = ', file_list, ' ', error)
return props
class SingletonConfig(Config):
def __init__(self, base_path, config_data):
self.base = base_path
self.config_data = config_data
self.product = config_data.get(KEY_PRODUCT)
self.version = '{0}'.format(config_data.get(KEY_VERSION))
self.remote_url = config_data.get(KEY_SERVICE_URL)
self.local_url = config_data.get(KEY_OFFLINE_URL)
if self.local_url:
parts = self.local_url.split('/')
self.local_type = parts[0][:-1]
if self.local_type == LOCAL_TYPE_FILE:
start = 2
needBasePath = False
if len(parts) > 3:
if parts[3] == '..' or parts[3] == '.':
start = 3
needBasePath = True
if parts[3].endswith(':'):
start = 3
self.local_url = '/'.join(parts[start:])
if needBasePath:
self.local_url = os.path.join(base_path, self.local_url)
self.log_path = self.get_path(KEY_LOGPATH) # log path
self.cache_path = self.get_path(KEY_CACHEPATH) # cache path
self.cache_type = self.get_item(KEY_CACHETYPE, 'default') # cache type
self.cache_expired_time = self.get_item(KEY_INTERVAL, 3600) # cache expired time
self.try_delay = self.get_item(KEY_TRYDELAY, 10) # try delay
self.default_locale = self.get_item(KEY_DEFAULT_LOCALE, LOCALE_DEFAULT)
self.source_locale = self.get_item(KEY_SOURCE_LOCALE, self.default_locale)
self._expand_components()
def _expand_locales(self, locales_def_array, template):
locales = {}
for one in locales_def_array:
locale_def = copy.deepcopy(one)
locales[locale_def.get(KEY_LANG_TAG)] = locale_def
if KEY_LOCAL_PATH not in locale_def and template:
locale_def[KEY_LOCAL_PATH] = copy.deepcopy(template.get(KEY_LOCAL_PATH))
return locales
def _expand_components(self):
self.components = None
components = self.config_data.get(KEY_COMPONENTS)
if not components:
return
expand = {}
self.components = {}
for component in components:
if KEY_LOCALES in component:
component[KEY_LOCALES] = self._expand_locales(component[KEY_LOCALES], None)
self.components[component.get(KEY_COMPONENT_TAG)] = copy.deepcopy(component)
continue
template_name = component.get(KEY_TEMPLATE)
if not template_name:
template_name = KEY_COMPONENT_TEMPLATE
if template_name not in expand:
t = self.config_data.get(template_name)
refer_name = t.get(KEY_LOCALES_REFER)
refer = self.config_data.get(refer_name)
if not refer:
continue
expand[template_name] = self._expand_locales(refer, t)
component[KEY_LOCALES] = expand[template_name]
self.components[component.get(KEY_COMPONENT_TAG)] = copy.deepcopy(component)
def get_config_data(self):
# method of Config
return self.config_data
def get_info(self):
# method of Config
info = {'product': self.product, 'version': self.version,
'remote': self.remote_url, 'local': self.local_url,
'source_locale': self.source_locale, 'default_locale': self.default_locale}
return info
def extract_list(self, key, key_name, key_refer, refer):
_dict = {}
_define = self.config_data.get(key)
if not _define:
return None
for one in _define:
dup = copy.deepcopy(one)
del dup[key_name]
_dict[one[key_name]] = dup
if key_refer not in dup and refer:
dup[key_refer] = copy.deepcopy(refer)
return _dict
def get_item(self, key, default_value):
value = self.config_data.get(key)
if value is None:
value = default_value
return value
def get_path(self, key):
path = self.config_data.get(key)
if path:
if path.startswith('./') or path.startswith('../'):
path = os.path.realpath(os.path.join(self.base, path))
return path
class SingletonApi:
VIP_PATH_HEAD = '/i18n/api/v2/translation/products/{0}/versions/{1}/'
VIP_PARAMETER = 'pseudo=false&machineTranslation=false&checkTranslationStatus=false'
VIP_GET_COMPONENT = 'locales/{0}/components/{1}?'
def __init__(self, release_obj):
self.rel = release_obj
self.cfg = release_obj.cfg
self.addr = self.cfg.remote_url
def get_component_api(self, component, locale):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
path = self.VIP_GET_COMPONENT.format(locale, component)
return '{0}{1}{2}{3}'.format(self.addr, head, path, self.VIP_PARAMETER)
def get_localelist_api(self):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
return '{0}{1}localelist'.format(self.addr, head)
def get_componentlist_api(self):
head = self.VIP_PATH_HEAD.format(self.cfg.product, self.cfg.version)
return '{0}{1}componentlist'.format(self.addr, head)
class SingletonUpdateThread(Thread):
def __init__(self, obj):
Thread.__init__(self)
self.obj = obj
def run(self):
self.obj.get_from_remote()
class SingletonAccessRemoteTask:
def __init__(self, release_obj, obj):
self.rel = release_obj
self.obj = obj
self.last_time = 0
self.querying = False
self.interval = self.rel.interval
def set_retry(self, current):
# try again after try_delay seconds
self.last_time = current - self.interval + self.rel.try_delay
def check(self):
if not self.rel.cfg.remote_url:
return
access_remote = False
if self.interval > 0:
current = time.time()
if current > self.last_time + self.interval:
access_remote = True
else:
if self.last_time == 0:
access_remote = True
if not access_remote:
return
if self.querying:
if self.obj.get_data_count() == 0:
while self.querying:
time.sleep(0.1)
return
self.querying = True
if self.obj.get_data_count() == 0:
self.obj.get_from_remote()
else:
th = SingletonUpdateThread(self.obj)
th.start()
class SingletonComponent:
def __init__(self, release_obj, locale, component, isLocalSource):
self.rel = release_obj
self.locale = locale
self.localeItem = self.rel.bykey.get_locale_item(locale, isLocalSource)
self.componentIndex = self.rel.bykey.get_component_index(component)
self.component = component
self.isLocalSource = isLocalSource
self.countOfMessages = 0
self.etag = None
self.cache_path = None
self.task = None if isLocalSource else SingletonAccessRemoteTask(release_obj, self)
if self.task and self.rel.cache_path:
self.cache_path = os.path.join(self.rel.cache_path, component, 'messages_{0}.json'.format(locale))
self.rel.log('--- cache file --- {0} ---'.format(self.cache_path))
if os.path.exists(self.cache_path):
dt = FileUtil.read_json_file(self.cache_path)
if KEY_MESSAGES in dt:
self.task.last_time = os.path.getmtime(self.cache_path)
self.set_messages(dt[KEY_MESSAGES])
def set_messages(self, messages):
for key in messages:
text = messages[key]
self.rel.bykey.set_string(key, self, self.componentIndex, self.localeItem, text)
self.countOfMessages = len(messages)
def get_messages(self):
return self.rel.bykey.get_messages(self.componentIndex, self.localeItem)
def get_message(self, key):
return self.rel.bykey.get_string(key, self.componentIndex, self.localeItem)
def is_messages_same(self, messages):
for key in messages:
text = messages[key]
message = self.rel.bykey.get_string(key, self.componentIndex, self.localeItem)
if message != text:
return False
return True
def get_from_remote(self):
current = time.time()
try:
# get messages
addr = self.rel.api.get_component_api(self.component, self.locale)
headers = {}
if self.etag:
headers[HEADER_REQUEST_ETAG] = self.etag
code, dt = NetUtil.http_get(addr, headers)
if code == 200 and ClientUtil.check_response_valid(dt):
self.etag, interval = NetUtil.get_etag_maxage(dt.get(KEY_HEADERS))
if interval:
self.task.interval = interval
messages = dt[KEY_RESULT][KEY_DATA][KEY_MESSAGES]
if self.cache_path:
if os.path.exists(self.cache_path) and self.is_messages_same(messages):
os.utime(self.cache_path, (current, current))
else:
self.rel.log('--- save --- {0} ---'.format(self.cache_path))
FileUtil.save_json_file(self.cache_path, dt[KEY_RESULT][KEY_DATA])
self.set_messages(messages)
self.task.last_time = current
elif code == 304:
self.task.last_time = current
else:
self.task.set_retry(current)
except SgtnException as e:
self.task.set_retry(current)
self.task.querying = False
def get_data_count(self):
return self.countOfMessages
class SingletonUseLocale:
def __init__(self, singletonLocale, sourceLocale, isLocalSource, bykey):
self.singletonLocale = singletonLocale
self.locale = self.singletonLocale.get_original_locale()
self.isLocalSource = isLocalSource
singletonSourceLocale = SingletonLocaleUtil.get_singleton_locale(sourceLocale)
self.isSourceLocale = self.locale in singletonSourceLocale.get_near_locale_list()
self.localeItem = bykey.get_locale_item(self.locale, True) if isLocalSource and bykey else None
self.components = {}
class SingletonReleaseBase:
def __init__(self, cfg):
self.cfg = cfg
self.cache_path = None
self.scope = None
self.logger = None
self.interval = 0
self.try_delay = 0
self.detach = False
self.locale_list = []
self.component_list = []
self.remote_pool = {}
self.source_pool = {}
self.local_handled = {}
self.component_handled = {}
if not cfg:
return
if cfg.log_path:
log_file = os.path.join(cfg.log_path, '{0}_{1}.log'.format(self.cfg.product, self.cfg.version))
self.init_logger(log_file)
if cfg.cache_path:
self.cache_path = os.path.join(cfg.cache_path, self.cfg.product, self.cfg.version)
self.log('--- cache path --- {0} ---'.format(self.cache_path))
self.interval = cfg.cache_expired_time
self.try_delay = cfg.try_delay
self.task = SingletonAccessRemoteTask(self, self)
self.get_scope()
self.remote_default_locale = self.get_locale_supported(self.cfg.default_locale)
self.remote_source_locale = self.get_locale_supported(self.cfg.source_locale)
self.isDifferent = self.remote_default_locale != self.remote_source_locale
self.bykey = SingletonByKey(self.cfg.source_locale, self.cfg.default_locale, self.isDifferent, self.cfg.cache_type)
self.useSourceLocale = self.get_use_locale(self.cfg.source_locale, True)
self._get_local_resource(self.useSourceLocale, self.cfg.source_locale)
self.useDefaultLocale = None
if self.isDifferent:
self.useDefaultLocale = self.get_use_locale(self.cfg.default_locale, False)
def get_use_locale(self, locale, asSource):
pool = self.source_pool if asSource else self.remote_pool
useLocale = pool.get(locale)
if useLocale is None:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
useLocale = singletonLocale.find_item(pool, 1)
if useLocale is None:
useLocale = SingletonUseLocale(singletonLocale, self.cfg.source_locale, asSource, self.bykey)
for one in useLocale.singletonLocale.get_near_locale_list():
if one not in pool:
pool[one] = useLocale
return useLocale
def get_scope(self):
self.api = SingletonApi(self)
if self.cache_path:
self.locale_list = FileUtil.read_json_file(os.path.join(self.cache_path, 'locale_list.json'))
self.component_list = FileUtil.read_json_file(os.path.join(self.cache_path, 'component_list.json'))
if not self.cfg.remote_url:
return
if not self.locale_list:
self.get_from_remote()
else:
th = SingletonUpdateThread(self)
th.start()
def get_from_remote(self):
self.task.last_time = time.time()
try:
# get locale list
scope = self._get_scope_item(self.api.get_localelist_api(), KEY_LOCALES, 'locale_list.json')
if scope:
self.locale_list = scope
# get component list
scope = self._get_scope_item(self.api.get_componentlist_api(), KEY_COMPONENTS, 'component_list.json')
if scope:
self.component_list = scope
except SgtnException as e:
pass
self.task.querying = False
def get_data_count(self):
if not self.locale_list or not self.component_list:
return 0
return len(self.locale_list) + len(self.component_list)
def init_logger(self, log_file):
self.logger = SysUtil.init_logger(log_file, 'sgtn_{0}_{1}'.format(self.cfg.product, self.cfg.version))
self.log('--- release --- {0} --- {1} --- {2} ---'.format(self.cfg.product, self.cfg.version, time.time()))
def log(self, text, log_type=LOG_TYPE_INFO):
SysUtil.log(self.logger, text, log_type)
def _load_one_local(self, component, locale, path_define):
if not path_define:
return None
for i, v in enumerate(path_define):
path = v.replace('$COMPONENT', component).replace('$LOCALE', locale)
path_define[i] = os.path.join(self.cfg.local_url, path)
return ClientUtil.read_resource_files(self.cfg.local_type, path_define)
def _get_scope_item(self, addr, key, keep_name):
code, dt = NetUtil.http_get(addr, None)
if code == 200 and ClientUtil.check_response_valid(dt):
_, interval = NetUtil.get_etag_maxage(dt.get(KEY_HEADERS))
if interval:
self.task.interval = interval
scope = dt[KEY_RESULT][KEY_DATA][key]
if scope and self.cache_path:
FileUtil.save_json_file(os.path.join(self.cache_path, keep_name), scope)
return scope
return None
def _extract_info_from_dir(self, root):
if self.cfg.local_type != LOCAL_TYPE_FILE:
return
components = {}
dir_list, _ = FileUtil.get_dir_info(root)
for component in dir_list:
components[component] = {}
component_obj = components[component]
component_obj[KEY_LOCALES] = {}
locales_cfg = component_obj.get(KEY_LOCALES)
component_path = os.path.join(self.cfg.local_url, component)
_, file_list = FileUtil.get_dir_info(component_path)
for res_file in file_list:
parts = re.split(r"messages(.*)\.", res_file)
if len(parts) == 3:
if parts[1].startswith('_'):
locale = parts[1][1:]
elif parts[1] == '':
locale = self.cfg.source_locale
locales_cfg[locale] = {KEY_LOCAL_PATH: [os.path.join(component, res_file)]}
return components
def _get_local_resource(self, useLocale, locale):
if useLocale is None:
return
locale_item = useLocale.components
if not self.cfg.local_url:
return
if not self.cfg.components:
self.cfg.components = self._extract_info_from_dir(self.cfg.local_url)
if not self.cfg.components:
return
for component in self.cfg.components:
locales_cfg = self.cfg.components[component].get(KEY_LOCALES)
locale_define = None
if locales_cfg:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
locale_define = singletonLocale.find_item(locales_cfg, 0)
combineKey = locale + '_!_' + component
if locale_define and combineKey not in self.local_handled:
path_define = locale_define.get(KEY_LOCAL_PATH)
map = self._load_one_local(component, locale, path_define)
component_obj = SingletonComponent(self, locale, component, useLocale.isLocalSource)
component_obj.set_messages(map)
locale_item[component] = component_obj
self.local_handled[combineKey] = True
def _get_remote_resource(self, locale, component):
if not self.locale_list or not self.component_list:
return None
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
if not singletonLocale.is_in_locale_list(self.locale_list):
return None
if component not in self.component_list:
return None
components = self.get_use_locale(locale, False).components
component_obj = components.get(component)
if component_obj is None:
self.log('--- component --- {0} ---'.format(component))
component_obj = SingletonComponent(self, locale, component, False)
components[component] = component_obj
component_obj.task.check()
return component_obj
def _get_component(self, locale, component):
component_remote = self._get_remote_resource(locale, component)
if component_remote:
return component_remote
component_obj = None
if self.cfg.local_url:
useLocale = self.get_use_locale(locale, False)
combineKey = locale + '_!_' + component
if combineKey not in self.local_handled:
self._get_local_resource(useLocale, locale)
self.local_handled[combineKey] = True
if useLocale:
component_obj = useLocale.components.get(component)
if component_obj is None and useLocale.isSourceLocale:
component_obj = self.useSourceLocale.components.get(component)
return component_obj
def _get_message(self, component, key, source, locale):
message = source if source is not None else key
if not key or not locale:
return message
if not self.bykey._onlyByKey and not component:
return message
self.task.check()
componentIndex = self.bykey.get_component_index(component)
if componentIndex >= 0:
combineKey = locale + '_!_' + component
if combineKey not in self.component_handled:
self._get_component(self.remote_source_locale, component)
componentObj = self._get_component(locale, component)
if self.isDifferent:
self._get_component(self.remote_default_locale, component)
if componentObj:
self.component_handled[combineKey] = True
localeItem = self.bykey.get_locale_item(locale, False)
message = self.bykey.get_string(key, componentIndex, localeItem, True)
return message
class SingletonRelease(SingletonReleaseBase, Release, Translation):
def get_config(self):
# method of Release
return self.cfg
def get_translation(self):
# method of Release
return self
def get_locale_strings(self, locale, asSource):
# method of Translation
collect = {}
useLocale = self.get_use_locale(locale, asSource)
if useLocale and useLocale.components:
components = useLocale.components
for component in components:
collect[component] = components[component].get_messages()
return collect
def get_source(self, component, key, sourceInCode):
componentIndex = self.bykey.get_component_index(component)
source = self.bykey.get_string(key, componentIndex, self.useSourceLocale.localeItem, False)
if source is not None:
return source
source = self._get_message(component, key, sourceInCode, self.cfg.source_locale)
return source
def get_raw(self, component, key, sourceInCode, locale, items):
useLocale = self.get_use_locale(locale, False)
if useLocale.isSourceLocale:
if sourceInCode is not None:
return sourceInCode
return self.get_source(component, key, sourceInCode)
source = self.get_source(component, key, sourceInCode)
if sourceInCode is not None and source is not None and source != sourceInCode:
return sourceInCode
return self._get_message(component, key, source, locale)
def get_string(self, component, key, **kwargs):
# method of Translation
sourceInCode = kwargs.get(KEY_SOURCE) if kwargs else None
locale = kwargs.get(KEY_LOCALE) if kwargs else None
items = kwargs.get(KEY_ITEMS) if kwargs else None
if not locale:
locale = SingletonClientManager().get_current_locale()
text = self.get_raw(component, key, sourceInCode, locale, items)
if text and items:
if isinstance(items, list):
text = self.format_by_array(text, items)
elif isinstance(items, dict):
text = self.format_by_map(text, items)
if text is None:
text = key
return text
def format_by_array(self, text, array):
return text.format(*array)
def format_by_map(self, text, map):
return text.format(**map)
def get_locale_supported(self, locale):
# method of Translation
return SysUtil.get_fallback_locale(locale)
class SingletonClientManager(object):
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
cls._instance.init()
return cls._instance
def init(self):
self._products = {}
def add_config_file(self, config_file, replaceMap=None):
config_text = FileUtil.read_text_file(config_file)
if replaceMap:
for key in replaceMap:
config_text = config_text.replace(key, replaceMap[key])
config_data = FileUtil.parse_datatree(config_text)
base_path = os.path.dirname(os.path.realpath(config_file))
cfg = self.add_config(base_path, config_data)
return cfg
def add_config(self, base_path, config_data):
if not config_data:
return
cfg = SingletonConfig(base_path, config_data)
release_obj = self.get_release(cfg.product, cfg.version)
if release_obj is None:
self.create_release(cfg)
return cfg
def get_release(self, product, version):
if not product or not version:
return None
releases = self._products.get(product)
if releases is None:
return None
return releases.get(version)
def create_release(self, cfg):
if not cfg or not cfg.product or not cfg.version:
return
releases = self._products.get(cfg.product)
if releases is None:
self._products[cfg.product] = {}
releases = self._products.get(cfg.product)
release_obj = releases.get(cfg.version)
if release_obj is None:
release_obj = SingletonRelease(cfg)
releases[cfg.version] = release_obj
def set_current_locale(self, locale):
current = sys._getframe().f_back.f_back
for i in range(10):
if not hasattr(current, 'f_locals'):
break
locals = current.f_locals
locals['_singleton_locale_'] = locale
if not hasattr(current, 'f_back'):
break
current = current.f_back
def get_current_locale(self):
current = sys._getframe().f_back.f_back
for i in range(10):
if not hasattr(current, 'f_locals'):
break
locals = current.f_locals
if '_singleton_locale_' in locals:
return locals['_singleton_locale_']
if not hasattr(current, 'f_back'):
break
current = current.f_back
return LOCALE_DEFAULT | zzz001 | /zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_client.py | sgtn_client.py |
from collections import OrderedDict
from sgtn_py_base import pybase
MAX_LINE_BUFFER = 1024
class LineReader:
def __init__(self, inCharBuf):
self.lineBuf = [None] * MAX_LINE_BUFFER
self.inLimit = 0
self.inOff = 0
self.inCharBuf = inCharBuf
if self.inCharBuf:
self.inLimit = len(self.inCharBuf)
def read_line(self):
length = 0
c = 0
skipWhiteSpace = True
isCommentLine = False
isNewLine = True
appendedLineBegin = False
precedingBackslash = False
skipLF = False
while True:
if self.inOff >= self.inLimit:
if length == 0 or isCommentLine:
return -1
if precedingBackslash:
length -= 1
return length
#The line below is equivalent to calling a ISO8859-1 decoder.
c = self.inCharBuf[self.inOff]
self.inOff += 1
if skipLF:
skipLF = False
if c == '\n':
continue
if skipWhiteSpace:
if c == ' ' or c == '\t' or c == '\f':
continue
if not appendedLineBegin and (c == '\r' or c == '\n'):
continue
skipWhiteSpace = False
appendedLineBegin = False
if isNewLine:
isNewLine = False
if c == '#' or c == '!':
isCommentLine = True
continue
if c != '\n' and c != '\r':
self.lineBuf[length] = c
length += 1
if length == len(self.lineBuf):
buf = [None] * length
self.lineBuf.extend(buf)
#flip the preceding backslash flag
if c == '\\':
precedingBackslash = not precedingBackslash
else:
precedingBackslash = False
else:
#reached end of line
if isCommentLine or length == 0:
isCommentLine = False
isNewLine = True
skipWhiteSpace = True
length = 0
continue
if self.inOff >= self.inLimit:
if precedingBackslash:
length -= 1
return length
if precedingBackslash:
length -= 1
#skip the leading whitespace characters in following line
skipWhiteSpace = True
appendedLineBegin = True
precedingBackslash = False
if c == '\r':
skipLF = True
else:
return length
class Properties:
def __init__(self):
self.kvTable = None
def parse(self, text):
self.kvTable = OrderedDict()
reader = LineReader(text)
self.load(reader)
return self.kvTable
def put(self, key, value):
oldValue = self.kvTable.get(key)
self.kvTable[key] = value
return oldValue
def load(self, lr):
convtBuf = [None] * MAX_LINE_BUFFER
while True:
limit = lr.read_line()
if limit < 0:
break
c = 0
keyLen = 0
valueStart = limit
hasSep = False
precedingBackslash = False
while True:
if keyLen >= limit:
break
c = lr.lineBuf[keyLen]
#need check if escaped.
if (c == '=' or c == ':') and not precedingBackslash:
valueStart = keyLen + 1
hasSep = True
break
elif (c == ' ' or c == '\t' or c == '\f') and not precedingBackslash:
valueStart = keyLen + 1
break
if c == '\\':
precedingBackslash = not precedingBackslash
else:
precedingBackslash = False
keyLen += 1
while True:
if valueStart >= limit:
break
c = lr.lineBuf[valueStart]
if c != ' ' and c != '\t' and c != '\f':
if not hasSep and (c == '=' or c == ':'):
hasSep = True
else:
break
valueStart += 1
key = self.load_convert(lr.lineBuf, 0, keyLen, convtBuf)
value = self.load_convert(lr.lineBuf, valueStart, limit-valueStart, convtBuf)
self.put(key, value)
def load_convert(self, inText, off, length, convtBuf):
if len(convtBuf) < length:
newLen = length * 2
convtBuf = [None] * newLen
outText = convtBuf
outLen = 0
end = off + length
while True:
if off >= end:
break
aChar = inText[off]
off += 1
if aChar == '\\':
aChar = inText[off]
off += 1
if aChar == 'u':
#Read the unicode after \u
value = 0
for i in range(4):
aChar = inText[off]
off += 1
if aChar >= '0' and aChar <= '9':
value = (value << 4) + ord(aChar) - ord('0')
elif aChar >= 'a' and aChar <= 'f':
value = (value << 4) + 10 + ord(aChar) - ord('a')
elif aChar >= 'A' and aChar <= 'F':
value = (value << 4) + 10 + ord(aChar) - ord('A')
else:
return None
outText[outLen] = pybase.int_to_unicode(value)
outLen += 1
else:
if aChar == 't':
aChar = '\t'
elif aChar == 'r':
aChar = '\r'
elif aChar == 'n':
aChar = '\n'
elif aChar == 'f':
aChar = '\f'
outText[outLen] = aChar
outLen += 1
else:
outText[outLen] = aChar
outLen += 1
return ''.join(outText[:outLen]) | zzz001 | /zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_properties.py | sgtn_properties.py |
import os
import sys
import json
import re
import logging
from collections import OrderedDict
from sgtn_py_base import pybase, SgtnException
from sgtn_debug import SgtnDebug
import ssl
if hasattr(ssl, '_create_unverified_context'): # for python 2.7
ssl._create_default_https_context = ssl._create_unverified_context
PY_VER = sys.version_info.major
UTF8 = 'utf-8'
httplib = pybase.get_httplib()
KEY_RESULT = 'result'
KEY_HEADERS = 'headers'
KEY_ERROR = 'error'
LOG_TYPE_INFO = 'info'
LOG_TYPE_ERROR = 'error'
# below keys are in lower case
LOCALE_MAP = {
'zh-hant': 'zh-Hant',
'zh-tw': 'zh-Hant',
'zh-hans': 'zh-Hans',
'zh-cn': 'zh-Hans'
}
class FileUtil:
LOG_INTERNAL = ''
@classmethod
def read_text_file(cls, file_name):
SgtnDebug.log_text('util', 'read file {0} / exist: {1}'.format(
file_name, os.path.exists(file_name)))
if os.path.exists(file_name) and os.path.isfile(file_name):
f = open(file_name, 'rb')
file_data = f.read()
f.close()
try:
file_data = file_data.decode(UTF8)
return file_data
except UnicodeDecodeError as e:
return None
return None
@classmethod
def parse_json_from_text(cls, text):
try:
dict_data = json.loads(text, object_pairs_hook=OrderedDict)
return dict_data
except json.decoder.JSONDecodeError as e:
raise SgtnException(str(e))
@classmethod
def parse_json(cls, text):
if text:
try:
return cls.parse_json_from_text(text)
except SgtnException as e:
return None
return None
@classmethod
def parse_yaml_from_text(cls, text):
try:
import yaml
dict_data = yaml.load(text, Loader=yaml.FullLoader)
return dict_data
except yaml.YAMLError as e:
raise SgtnException(str(e))
@classmethod
def parse_yaml(cls, text):
if text:
try:
return cls.parse_yaml_from_text(text)
except SgtnException as e:
return None
return None
@classmethod
def parse_datatree(cls, text):
if text:
data = cls.parse_yaml(text)
if data is None:
data = cls.parse_json(text)
return data
return None
@classmethod
def read_json_file(cls, file_name):
file_data = cls.read_text_file(file_name)
return cls.parse_json(file_data)
@classmethod
def read_datatree(cls, file_name):
file_data = cls.read_text_file(file_name)
return cls.parse_datatree(file_data)
@classmethod
def save_json_file(cls, file_name, dict):
dir = os.path.dirname(file_name)
if not os.path.exists(dir):
os.makedirs(dir)
f = pybase.open_file(file_name, 'w')
text = json.dumps(dict, ensure_ascii=False, indent=2)
f.write(text)
f.close()
@classmethod
def get_dir_info(cls, dir_name):
dir_list = []
file_list = []
try:
ls = os.listdir(dir_name)
except IOError as e:
pass
else:
for fn in ls:
temp = os.path.join(dir_name, fn)
if os.path.isdir(temp):
dir_list.append(fn)
else:
file_list.append(fn)
return dir_list, file_list
class NetUtil:
simulate_data = None
record_data = {'enable': False, 'records': {}}
@classmethod
def _get_data(cls, url, request_headers):
if not cls.simulate_data:
req = httplib.Request(url)
if request_headers:
for key in request_headers:
req.add_header(key, request_headers[key])
try:
res_data = httplib.urlopen(req)
except IOError as e:
raise SgtnException(str(e))
headers = {}
for h in res_data.headers:
headers[h.lower()] = res_data.headers[h].lower()
try:
result = res_data.read()
except IOError as e:
raise SgtnException(str(e))
try:
text = result.decode(UTF8)
except UnicodeDecodeError as e:
raise SgtnException(str(e))
if cls.record_data['enable']:
header_part = json.dumps(request_headers) if request_headers else request_headers
key = '{0}<<headers>>{1}'.format(url, header_part) if header_part else url
cls.record_data['records'][key] = {'text': text, 'headers': headers}
return text, headers
else:
header_part = json.dumps(request_headers) if request_headers else request_headers
key = '{0}<<headers>>{1}'.format(url, header_part) if header_part else url
kept = cls.simulate_data.get(key)
if kept:
if 'code' in kept:
if kept['code'] == 304:
raise SgtnException('Error 304:')
return kept['text'], kept['headers']
return None, None
@classmethod
def http_get_text(cls, url):
text = None
try:
text, _ = cls._get_data(url, None)
except SgtnException as e:
pass
return text
@classmethod
def http_get(cls, url, request_headers):
ret = {}
code = 400
try:
text, headers = cls._get_data(url, request_headers)
ret[KEY_RESULT] = FileUtil.parse_json_from_text(text)
ret[KEY_HEADERS] = headers
code = 200
except SgtnException as e:
err_msg = str(e)
parts = re.split("Error ([0-9]*):", err_msg)
if len(parts) > 1:
code = int(parts[1])
if code != 304:
ret[KEY_ERROR] = 'HTTP ERROR: {0}'.format(str(e))
return code, ret
@classmethod
def get_etag_maxage(cls, headers):
if headers is None:
return None, None
etag = headers.get('etag')
text = headers.get('cache-control')
if text is None:
return etag, None
parts = re.split("max\\-age[ ]*\\=[ ]*([0-9]*)[ ]*", text)
if len(parts) < 2:
return etag, None
return etag, float(parts[1])
class SysUtil:
@classmethod
def init_logger(cls, log_file, log_name):
handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(message)s')
handler.setFormatter(formatter)
logger = logging.getLogger(log_name)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
cls.log(logger, '')
cls.log(logger, '--- start --- python --- {0}'.format(sys.version.split('\n')[0]))
return logger
@classmethod
def log(cls, logger, text, log_type=LOG_TYPE_INFO):
if logger:
if log_type == LOG_TYPE_INFO:
logger.info(text)
return
elif log_type == LOG_TYPE_ERROR:
logger.error(text)
return
print(text)
@classmethod
def get_fallback_locale(cls, locale):
parts = re.split(r"[\-_]", locale)
parts[0] = parts[0].lower()
if len(parts) > 1:
parts[1] = parts[1].upper()
locale = '-'.join(parts)
fallback = LOCALE_MAP.get(locale.lower())
if fallback:
return fallback
return parts[0] | zzz001 | /zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_util.py | sgtn_util.py |
from collections import OrderedDict
import threading
lock = threading.Lock()
from sgtn_locale import SingletonLocaleUtil
_indexLocaleItem = 0
class SingletonByKeyItem(object):
def __init__(self, componentIndex, itemIndex):
self._componentIndex = componentIndex
self._pageIndex = itemIndex // SingletonByKey.PAGE_MAX_SIZE
self._indexInPage = itemIndex % SingletonByKey.PAGE_MAX_SIZE
self._sourceStatus = 0x01
self._next = None
class SingletonByKeyTable(object):
def __init__(self, max):
self._max = max
self._table = [None] * max
def get_page(self, id):
return self._table[id]
def new_page(self, id):
array = [None] * self._max
self._table[id] = array
return array
def get_item(self, pageIndex, indexInPage):
array = self.get_page(pageIndex)
if array is None:
return None
return array[indexInPage]
def set_item(self, pageIndex, indexInPage, item):
array = self.get_page(pageIndex)
if array is None:
array = self.new_page(pageIndex)
array[indexInPage] = item
def get_item_by_one_index(self, index):
pageIndex = index // self._max
indexInPage = index % self._max
return self.get_item(pageIndex, indexInPage)
def set_item_by_one_index(self, index, item):
pageIndex = index // self._max
indexInPage = index % self._max
self.set_item(pageIndex, indexInPage, item)
class SingletonByKeyComponents(object):
def __init__(self):
self._count = 0
self._componentTable = []
self._componentIndexTable = {}
def get_id(self, component):
if not component:
return -1
componentIndex = self._componentIndexTable.get(component)
if componentIndex is not None:
return componentIndex
self._componentTable.append(component)
self._componentIndexTable[component] = self._count
self._count += 1
return self._count - 1
def get_name(self, id):
if id < 0 or id >= self._count:
return None
return self._componentTable[id]
class SingletonByKeyLocale(object):
def __init__(self, bykey, locale, asSource):
global _indexLocaleItem
_indexLocaleItem += 1
self._indexLocaleItem = _indexLocaleItem
self._bykey = bykey
self._locale = locale
self._singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
self._asSource = asSource
self._isSourceLocale = self._singletonLocale.compare(bykey._singletonLocaleSource)
self._messages = SingletonByKeyTable(SingletonByKey.PAGE_MAX_SIZE)
self._components = SingletonByKeyTable(SingletonByKey.COMPONENT_PAGE_MAX_SIZE)
def check_task(self, componentIndex, needCheck):
if componentIndex >= 0 and needCheck:
componentObj = self._components.get_item_by_one_index(componentIndex)
if componentObj is not None and componentObj.task is not None:
componentObj.task.check()
def get_message(self, componentIndex, pageIndex, indexInPage, needCheck=True):
self.check_task(componentIndex, needCheck)
return self._messages.get_item(pageIndex, indexInPage)
def set_message(self, message, componentObject, componentIndex, pageIndex, indexInPage):
if componentObject:
self._components.set_item_by_one_index(componentIndex, componentObject)
self._messages.set_item(pageIndex, indexInPage, message)
return True
class SingletonLookup(object):
def __init__(self, key, componentIndex, message):
self._key = key
self._componentIndex = componentIndex
self._message = message
self._add = 0
self._aboveItem = None
self._currentItem = None
class SingletonByKey(object):
PAGE_MAX_SIZE = 1024
COMPONENT_PAGE_MAX_SIZE = 128
def __init__(self, localeSource, localeDefault, isDifferent, cacheType):
self._itemCount = 0
self._keyAttrTable = {}
self._items = SingletonByKeyTable(SingletonByKey.PAGE_MAX_SIZE)
self._componentTable = SingletonByKeyComponents()
self._singletonLocaleSource = SingletonLocaleUtil.get_singleton_locale(localeSource)
self._sources = {}
self._locales = {}
self._onlyByKey = (cacheType == 'by_key')
self._isDifferent = isDifferent
self._sourceLocal = None
self._sourceRemote = None
self._defaultLocale = localeDefault
self._defaultRemote = None
def set_item(self, item, pageIndex, indexInPage):
self._items.set_item(pageIndex, indexInPage, item)
return True
def get_and_add_itemcount(self):
count = self._itemCount
self._itemCount += 1
return count
def get_locale_item(self, locale, asSource):
table = self._sources if asSource else self._locales
item = table.get(locale)
if item is None:
singletonLocale = SingletonLocaleUtil.get_singleton_locale(locale)
for oneLocale in table:
oneSingletonLocale = SingletonLocaleUtil.get_singleton_locale(oneLocale)
if singletonLocale.compare(oneSingletonLocale):
item = table[oneLocale]
break
if item is None:
item = SingletonByKeyLocale(self, locale, asSource)
table[locale] = item
return item
def get_component_index(self, component):
return self._componentTable.get_id(component)
def get_string(self, key, componentIndex, localeItem, needFallback=False):
if componentIndex < 0 and not self._onlyByKey:
return None
item = self._keyAttrTable.get(key)
if componentIndex >= 0:
while item:
if item._componentIndex == componentIndex:
break
item = item._next
if item is None:
localeItem.check_task(componentIndex, needFallback)
return None
if not needFallback:
message = localeItem.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
return message
message = None
if item._sourceStatus & 0x01 == 0x01:
message = localeItem.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is not None:
return message
if self._isDifferent:
if not self._defaultRemote:
self._defaultRemote = self.get_locale_item(self._defaultLocale, False)
message = self._defaultRemote.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is None:
if item._sourceStatus & 0x04 == 0x04:
message = self._sourceLocal.get_message(componentIndex, item._pageIndex, item._indexInPage)
elif item._sourceStatus & 0x03 == 0x03:
message = self._sourceRemote.get_message(componentIndex, item._pageIndex, item._indexInPage)
if message is None:
message = key
return message
def new_key_item(self, componentIndex):
itemIndex = self.get_and_add_itemcount()
item = SingletonByKeyItem(componentIndex, itemIndex)
self.set_item(item, item._pageIndex, item._indexInPage)
return item
def _find_or_add(self, lookup):
item = self._keyAttrTable.get(lookup._key)
if item is None: # This is new
lookup._currentItem = self.new_key_item(lookup._componentIndex)
lookup._add = 1
return
while item:
if item._componentIndex == lookup._componentIndex: # Found
lookup._currentItem = item
return
lookup._aboveItem = item
item = item._next
lookup._currentItem = self.new_key_item(lookup._componentIndex)
lookup._add = 2
def do_set_string(self, key, componentObject, componentIndex, localeItem, message):
lookup = SingletonLookup(key, componentIndex, message)
self._find_or_add(lookup)
item = lookup._currentItem
if item is None:
return False
done = localeItem.set_message(message, componentObject, componentIndex, item._pageIndex, item._indexInPage)
if done and localeItem._isSourceLocale:
status = item._sourceStatus
if localeItem._asSource:
self._sourceLocal = localeItem
status |= 0x04
elif localeItem._isSourceLocale:
self._sourceRemote = localeItem
status |= 0x02
if (status & 0x06) != 0x06:
status |= 0x01
else:
localSource = self._sourceLocal.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
remoteSource = self._sourceRemote.get_message(componentIndex, item._pageIndex, item._indexInPage, False)
if localSource == remoteSource:
status |= 0x01
else:
status &= 0x06
item._sourceStatus = status
# Finally, it's added in the table after it has been prepared to keep reading correct.
if lookup._add == 1:
self._keyAttrTable[key] = lookup._currentItem
elif lookup._add == 2:
lookup._aboveItem._next = lookup._currentItem
return done
def set_string(self, key, componentObject, componentIndex, localeItem, message):
if message is None or key is None or localeItem is None:
return False
text = self.get_string(key, componentIndex, localeItem)
if message != text:
with lock:
text = self.get_string(key, componentIndex, localeItem)
if message != text:
return self.do_set_string(key, componentObject, componentIndex, localeItem, message)
return False
def get_key_item(self, pageIndex, indexInPage):
array = self._items.get_page(pageIndex)
if array is None:
return None
return array[indexInPage]
def get_messages(self, componentIndex, localeItem):
messages = OrderedDict()
if componentIndex >= 0 and localeItem:
pages = {}
for i in range(SingletonByKey.PAGE_MAX_SIZE):
array = localeItem._messages.get_page(i)
if array is None:
continue
for k in range(SingletonByKey.PAGE_MAX_SIZE):
text = array[k]
if text is not None:
item = self.get_key_item(i, k)
if item:
if i not in pages:
pages[i] = {}
pages[i][k] = ''
for key in self._keyAttrTable:
item = self._keyAttrTable.get(key)
if item._pageIndex in pages:
array = pages[item._pageIndex]
if item._indexInPage in array:
messages[key] = self.get_string(key, componentIndex, localeItem)
return messages | zzz001 | /zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_bykey.py | sgtn_bykey.py |
import re
class SingletonLocale(object):
def __init__(self, locale):
self._localeList = [locale]
def get_near_locale_list(self):
return self._localeList
def add_near_locale(self, locale):
if locale in self._localeList:
return False
self._localeList.append(locale)
return True
def get_count(self):
return len(self._localeList)
def get_near_locale(self, index):
if index < 0 or index >= self.get_count():
return None
return self._localeList[index]
def get_original_locale(self):
return self.get_near_locale(0)
def compare(self, singletonLocale):
if singletonLocale is None:
return False
return self.is_in_locale_list(singletonLocale.get_near_locale_list())
def is_in_locale_list(self, checkList):
if checkList is None:
return False
for i in range(self.get_count()):
if self.get_near_locale(i) in checkList:
return True
return False
def find_item(self, items, start):
for i in range(start, self.get_count()):
nearLocale = self.get_near_locale(i)
item = items.get(nearLocale)
if item:
return item
return None
def set_items(self, items, item):
for i in range(self.get_count()):
nearLocale = self.get_near_locale(i)
items[nearLocale] = item
class SingletonLocaleUtil(object):
DEFAULT_LOCALE = "en-US"
FALLBACK = {
'zh-CN': 'zh-Hans',
'zh-TW': 'zh-Hant',
'zh-HANS': 'zh-Hans',
'zh-HANT': 'zh-Hant'
}
LocaleFallbackMap = {}
SystemLocale = None
@classmethod
def get_singleton_locale(cls, locale):
if locale is None:
return cls.get_singleton_locale(SingletonLocaleUtil.DEFAULT_LOCALE)
singletonLocale = cls.LocaleFallbackMap.get(locale.lower())
if singletonLocale:
return singletonLocale
parts = re.split(r'[\_|\-]', locale)
parts[0] = parts[0].lower()
if len(parts) > 1:
parts[1] = parts[1].upper()
original = '-'.join(parts)
singletonLocale = SingletonLocale(original)
fallback = cls.FALLBACK.get(original)
if fallback:
singletonLocale.add_near_locale(fallback)
elif len(parts) > 1:
singletonLocale.add_near_locale(parts[0])
cls.LocaleFallbackMap[locale.lower()] = singletonLocale
return singletonLocale | zzz001 | /zzz001-0.0.4.tar.gz/zzz001-0.0.4/sgtnclient/sgtn_locale.py | sgtn_locale.py |
=============
zzzeeksphinx
=============
This is zzzeek's own Sphinx layout, used by SQLAlchemy.
This layout is first and foremost pulled in for the SQLAlchemy documentation
builds (and possibly other related projects).
.. note:: The stability of zzzeeksphinx is **not** guaranteed and APIs and
behaviors can change at any time. For use in other projects, please fork
and/or adapt any portion of useful code as needed.
Features include:
* Uses Mako templates instead of Jinja, for more programmatic capabilities
inside of templates.
* Layout includes an independently scrollable sidebar
* A unique (to Sphinx) "contextual" sidebar contents that shows the
current page in context with all sibling pages (like that of MySQL's docs).
This is a form of TOC that Sphinx doesn't typically have a lot of
capability to do (well it could, with some simple feature adds), but
IMO this kind of navigation is critical for very large and nested
documentation sets, so that the navbar stays relatively small yet provides
context as to where you are in the docs and what else is locally available.
* Modifications to autodoc which illustrate inherited classes, bases,
method documentation illustrates if a method is only inherited from the
base or overridden.
* A "dynamic base" feature that will, under ReadTheDocs, pull in optional
``.mako`` and ``.py`` files from the website of your choice
that will serve as an alternate base template and a source of extra
config setup, respectively, allowing the layout to be integrated into
the layout of an external site when viewing on the web.
* A "viewsource" extension that can provide highlighted sourcecode to any
Python file arbitrarily.
* SQLAlchemy-specific stuff, like the [SQL] popups, the dialect info
directives.
* scss support using pyscss.
Config
======
in conf.py, the extension is::
extensions = [
'zzzeeksphinx',
]
The theme is::
html_theme = 'zzzeeksphinx'
Other configs that SQLAlchemy has set up; these two are probably
needed::
# The short X.Y version.
version = "1.0"
# The full version, including alpha/beta/rc tags.
release = "1.0.0"
release_date = "Not released"
Additional configs for the "dynamic site thing" look like::
site_base = os.environ.get("RTD_SITE_BASE", "http://www.sqlalchemy.org")
site_adapter_template = "docs_adapter.mako"
site_adapter_py = "docs_adapter.py"
Configs which do some last-minute translation of module names
when running autodoc to display API documentation::
autodocmods_convert_modname = {
"sqlalchemy.sql.sqltypes": "sqlalchemy.types",
"sqlalchemy.sql.type_api": "sqlalchemy.types",
"sqlalchemy.sql.schema": "sqlalchemy.schema",
"sqlalchemy.sql.elements": "sqlalchemy.sql.expression",
"sqlalchemy.sql.selectable": "sqlalchemy.sql.expression",
"sqlalchemy.sql.dml": "sqlalchemy.sql.expression",
"sqlalchemy.sql.ddl": "sqlalchemy.schema",
"sqlalchemy.sql.base": "sqlalchemy.sql.expression"
}
autodocmods_convert_modname_w_class = {
("sqlalchemy.engine.interfaces", "Connectable"): "sqlalchemy.engine",
("sqlalchemy.sql.base", "DialectKWArgs"): "sqlalchemy.sql.base",
}
| zzzeeksphinx | /zzzeeksphinx-1.4.0.tar.gz/zzzeeksphinx-1.4.0/README.rst | README.rst |
ZzzFS: dataset management à la ZFS
ZzzFS ("snooze FS") brings a set of ZFS management commands to non-ZFS volumes,
turning any directory on a traditional filesystem into a zpool-like object.
Using only the Python standard library, ZzzFS can be useful to, for example,
test tools that use ZFS functionality on a system lacking real ZFS. Of course,
ZzzFS misses all of the low-level features underpinning true ZFS volumes:
checksumming, copy-on-write, etc.
Note that this is distinct from the ZFS feature allowing a zpool to be created
using a regular file as a vdev. ZzzFS translates commands into move/copy/symlink
operations in the original filesystem; it does not manage blocks in a virtual
disk.
This is a functional work in progress; don't trust any important data to it
just yet. The test suite covers the following features:
* create/destroy/list "filesystems" and "pools"
* clone/promote, send/receive, rollback, diff snapshots
* get/set/inherit attributes
* pool command history
Example usage::
$ zzzpool create mypool /tmp/pool
$ zzzpool list
NAME SIZE ALLOC FREE CAP HEALTH ALTROOT
mypool - - - - ONLINE -
$ zzzfs create mypool/work
$ zzzfs create mypool/play
$ zzzfs snapshot mypool/work@yesterday
$ zzzfs list -t all
NAME USED AVAIL REFER MOUNTPOINT
mypool - - - /private/tmp/pool/mypool
mypool/play - - - /private/tmp/pool/mypool/play
mypool/work - - - /private/tmp/pool/mypool/work
mypool/work@yesterday - - - -
$ zzzfs send mypool/work@yesterday | zzzfs receive mypool/more_work
$ zzzpool history
History for 'mypool':
2015-01-13.22:32:38 zzzpool create mypool /tmp/pool
2015-01-13.22:32:50 zzzfs create mypool/work
2015-01-13.22:32:53 zzzfs create mypool/play
2015-01-13.22:32:56 zzzfs snapshot mypool/work@yesterday
2015-01-13.22:33:48 zzzfs receive mypool/more_work
For more details on real ZFS command usage, see the Oracle Solaris ZFS
Administration Guide (https://docs.oracle.com/cd/E26505_01/pdf/E37384.pdf).
Released under the CDDL v1.1 license. There's no original ZFS code present, but
it's only appropriate to pair "snooze" with "cuddle." | zzzfs | /zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/README | README |
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
def validate_component_name(component_name, allow_slashes=False):
'''Check that component name starts with an alphanumeric character, and
disalllow all non-alphanumeric characters except underscore, hyphen, colon,
and period in component names.
'''
allowed = ('_', '-', ':', '.')
if allow_slashes:
allowed += ('/',)
if len(component_name) == 0:
return False
if not component_name[0].isalnum():
return False
for c in component_name:
if c not in allowed and not c.isalnum():
return False
return True
class ZzzFSException(Exception):
pass
class PropertyList(object):
# Numeric columns are right-aligned when tabulated.
numeric_types = ['alloc', 'avail', 'cap', 'free', 'refer', 'size', 'used']
# synonymous field names
shorthand = {'available': 'avail', 'capacity': 'cap'}
def __str__(self):
return ','.join(self.items)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, str(self))
def __init__(self, user_string):
self.items = user_string.split(',')
def validate_against(self, acceptable):
# compare against a set of acceptable fields
for col in self.names:
if col not in acceptable:
raise ZzzFSException('%s: unrecognized property name' % col)
@property
def names(self):
# use shorthand name, if any, as canonical name
for col in self.items:
yield self.shorthand.get(col, col)
@property
def types(self):
# strings unless explicitly numeric
for col in self.names:
if col in self.numeric_types:
yield int
else:
yield str
class PropertyAssignment(object):
'''property=value command-line argument, as used in zzzfs set command.'''
def __init__(self, user_string):
try:
self.key, self.val = user_string.split('=')
except ValueError:
raise ZzzFSException(
'%r: invalid property=value format' % user_string)
if not validate_component_name(self.key):
raise ZzzFSException('%s: invalid property' % self.key)
self.user_string = user_string
def __str__(self):
return self.user_string
def tabulated(data, headers, scriptable_mode=False, sort_asc=[], sort_desc=[]):
'''Generates a printable table as a string given data (an array of dicts)
and an array of field names for headers.
'''
if len(data) == 0:
return ''
types = list(headers.types)
names = list(headers.names)
row_format = '\t'.join('%s' for i in range(len(names)))
if not scriptable_mode:
# For evenly-spaced columns, left-align each text field (right-align
# each numeric field) in a cell that's big enough for the longest value
# in each column.
data_and_headers = data + [dict(zip(names, names))]
cells = []
for i in range(len(names)):
box_width = max(len(r.get(names[i]) or '-') for r in data_and_headers)
if types[i] == str:
box_width *= -1 # negative field width means left-align
cells.append('%%%ds' % box_width)
row_format = '\t'.join(cells)
# sort by specified fields, if any
for field in sort_asc + sort_desc:
if field not in names:
raise ZzzFSException('%s: no such column' % field)
for field in sort_asc:
data = sorted(data, key=lambda row: row[field])
for field in sort_desc:
data = list(reversed(sorted(data, key=lambda row: row[field])))
# Add individual data rows.
output = '\n'.join(
row_format % tuple(row.get(names[i]) or '-' for i in range(len(names)))
for row in data)
# Prepend header row in all caps.
if not scriptable_mode:
output = row_format % tuple(h.upper() for h in names) + '\n' + output
return output | zzzfs | /zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/util.py | util.py |
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
#
# ZzzFS strucutre:
#
# <ZZZFS_ROOT>/
# <pool_name>/
# data -> <disk>
# properties/
# filesystems/
# <fs_name>/
# data -> ../data/<fs_name>/
# properties/
# snapshots/
# <snapshot_name>/
# data/
# properties/
# [...]
# <fs_name>%<sub_fs_name>/
# data -> ../data/<fs_name>/<sub_fs_name>/
# properties/
# snapshots/
# [...]
# [...]
import io
import os
import csv
import pwd
import gzip
import time
import shutil
import logging
import tarfile
import datetime
import platform
from libzzzfs.util import validate_component_name, ZzzFSException
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
ZZZFS_DEFAULT_ROOT = os.path.expanduser('~/.zzzfs')
def get_dataset_by(dataset_name, should_be=None, should_exist=True):
'''Handle user-specified dataset name, returning a Filesystem or Snapshot
based on the name. If should_be is specified, an exception is raised if the
dataset is not an instance of the specified class. If should_exist is
False/True, an exception is raised if the dataset does/does not already
exist; no check is performed if should_exist is None.
'''
# validate dataset identifier
filesystem_name = dataset_name
snapshot_name = None
# distinguish between "fs_name" and "fs_name@snapshot"
if dataset_name.count('@') == 1:
filesystem_name, snapshot_name = dataset_name.split('@', 1)
if not validate_component_name(filesystem_name, allow_slashes=True):
raise ZzzFSException('%s: invalid dataset identifier' % dataset_name)
obj = Filesystem(dataset_name)
if snapshot_name:
if not validate_component_name(snapshot_name):
raise ZzzFSException('%s: invalid snapshot name' % snapshot_name)
obj = Snapshot(filesystem_name, snapshot_name)
if should_be:
if not isinstance(obj, should_be):
raise ZzzFSException(
'%s: not a %s' % (dataset_name, should_be.__name__.lower()))
if should_exist and not obj.exists():
raise ZzzFSException('%s: no such dataset' % dataset_name)
if obj.exists() and should_exist == False:
raise ZzzFSException('%s: dataset exists' % dataset_name)
# pool should exist, even if dataset itself shouldn't
#logger.debug('%s, in pool %s', obj, obj.pool)
if not obj.pool.exists():
raise ZzzFSException('%s: no such pool' % obj.pool.name)
return obj
def get_all_datasets(identifiers, types, recursive, max_depth):
'''Get all datasets matching the given identifier names and dataset types,
and optionally all or a generational subset of their descendants.
'''
types.validate_against(['all', 'filesystem', 'snapshot', 'snap'])
# start with set of all filesystems and snapshots
filesystems = [f for p in Pool.all() for f in p.get_filesystems()]
snapshots = [s for f in filesystems for s in f.get_snapshots()]
datasets = filesystems + snapshots
# filter to specific identifiers if requested
if identifiers:
datasets = [get_dataset_by(i) for i in identifiers]
# add children of specified identifiers, if requested
if recursive or max_depth:
children = []
for d in datasets:
if isinstance(d, Filesystem):
children += d.get_children(max_depth)
datasets += children
# add any snapshots of identifiers and their descendants
# it's safe to modify the list as we iterate, because we're only adding
# snapshots, not filesystems
for d in datasets:
if isinstance(d, Filesystem):
datasets += d.get_snapshots()
# filter out filesystems, if not requested
if not any(t in ('all', 'filesystem') for t in types.items):
datasets = [d for d in datasets if not isinstance(d, Filesystem)]
# filter out snapshots, if not requested
if not any(t in ('all', 'snapshot', 'snap') for t in types.items):
datasets = [d for d in datasets if not isinstance(d, Snapshot)]
return datasets
class Dataset(object):
'''Base class for Pool, Filesystem, and Snapshot. Contains methods that
apply to all three objects.
'''
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.name)
@property
def properties(self):
return os.path.join(self.root, 'properties')
@property
def data(self):
return os.path.join(self.root, 'data')
@property
def base_attrs(self):
return {'name': self.name}
@property
def creation(self):
# On POSIX systems, ctime is metadata change time, not file creation
# time, but these should be the same value for our dataset roots.
try:
return time.ctime(os.path.getctime(self.root))
except OSError: # dataset is currently being destroyed, perhaps
return None
def get_parent(self):
if '/' in self.name:
return Filesystem(self.name.rsplit('/', 1)[-2])
return Pool(self.name)
def get_local_properties(self):
attrs = self.base_attrs
try:
keys = os.listdir(self.properties)
except OSError:
# no local attributes
return attrs
for key in keys:
with open(os.path.join(self.properties, key), 'r') as f:
attrs[key] = f.read()
#logger.debug('%s local attributes: %s', self.name, attrs)
return attrs
def get_inherited_properties(self):
attrs = {}
local_attrs = self.get_local_properties()
# inherit values for any attributes not overridden locally, bottom-up
parent = self
while parent.get_parent():
parent = parent.get_parent()
for key, val in parent.get_local_properties().items():
if key not in attrs and key not in local_attrs:
attrs[key] = val
return attrs
def add_local_property(self, key, val):
if not os.path.exists(self.properties):
os.makedirs(self.properties)
if '/' in key:
raise ZzzFSException('%s: invalid property' % key)
with open(os.path.join(self.properties, key), 'w') as f:
f.write(val)
def get_property_and_source(self, key):
local = self.get_local_properties()
if key in local:
return (local[key], 'local')
inherited = self.get_inherited_properties()
if key in inherited:
return (inherited[key], 'inherited')
# property not found
return (None, None)
def get_property(self, key):
val, _ = self.get_property_and_source(key)
return val
def remove_local_property(self, key):
if self.get_property_and_source(key)[1] == 'local':
os.remove(os.path.join(self.properties, key))
return True
else:
# property did not exist, or is not local
return False
class Pool(Dataset):
def __init__(self, name, should_exist=None):
self.name = name
zzzfs_root = os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT)
if not os.path.exists(zzzfs_root):
os.makedirs(zzzfs_root)
self.root = os.path.join(zzzfs_root, self.name)
self.filesystems = os.path.join(self.root, 'filesystems')
self.history = os.path.join(self.root, 'history')
if should_exist and not self.exists():
raise ZzzFSException('%s: no such pool' % self.name)
if should_exist == False and self.exists():
raise ZzzFSException('%s: pool exists' % self.name)
def get_parent(self):
# pool is the top-most desendent of any dataset
return None
@classmethod
def all(self):
# return an array of all Pool objects
try:
return [Pool(name) for name in os.listdir(
os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT))]
except OSError:
# zzzfs_root doesn't exist, so no pools have been created
return []
def exists(self):
return self.name in os.listdir(
os.environ.get('ZZZFS_ROOT', ZZZFS_DEFAULT_ROOT))
def create(self, disk):
if os.path.exists(disk) and len(os.listdir(disk)) != 0:
raise ZzzFSException('%s: disk in use' % self.name)
os.makedirs(self.root)
pool_target = os.path.join(os.path.abspath(disk), self.name)
os.makedirs(pool_target)
os.symlink(pool_target, self.data)
# create initial root filesystem for this pool
Filesystem(self.name).create()
def destroy(self):
if os.path.exists(os.path.realpath(self.data)):
shutil.rmtree(os.path.realpath(self.data))
shutil.rmtree(self.root)
def get_filesystems(self):
try:
fs = os.listdir(self.filesystems)
except OSError: # dataset is currently being destroyed, perhaps
return
for x in fs:
# unescape slashes when instantiating Filesystem object
yield Filesystem(x.replace('%', '/'))
def get_history(self, long_format=False):
try:
with open(self.history, 'r') as f:
history = csv.reader(f)
for (date, command, user, host) in history:
if long_format:
yield '%s %s [user %s on %s]' % (date, command, user, host)
else:
yield '%s %s' % (date, command)
except IOError:
# no logged history
pass
def log_history_event(self, argv, date=None, user=None, host=None):
command = ' '.join(argv)
if not date: # default date is now
date = datetime.datetime.now()
if not user: # default user is user executing this script
user = pwd.getpwuid(os.getuid()).pw_name
if not host: # default host is the current platform host
host = platform.node()
with open(self.history, 'a') as f:
history = csv.writer(f)
history.writerow(
[date.strftime('%Y-%m-%d.%H:%M:%S'), command, user, host])
class Filesystem(Dataset):
def __init__(self, filesystem):
# need to escape slashes to use filesystem name as file name
self.name = filesystem
self.safe_name = self.name.replace('/', '%')
# get pool name by walking up tree
obj = self
while obj.get_parent():
obj = obj.get_parent()
self.pool = Pool(obj.name)
self.poolless_name = self.name[len(self.pool.name)+1:]
self.root = os.path.join(self.pool.root, 'filesystems', self.safe_name)
self.snapshots = os.path.join(self.root, 'snapshots')
@property
def mountpoint(self):
# before the filesystem is created, the symlink doesn't resolve, so
# this is a method that recomputes te property whenever it is accessed
try:
return os.path.realpath(self.data)
except OSError: # dataset is currently being destroyed, perhaps
return None
@property
def base_attrs(self):
data = super(Filesystem, self).base_attrs
data['mountpoint'] = self.mountpoint
data['creation'] = self.creation
return data
def exists(self):
return os.path.exists(self.root)
def get_children(self, max_depth=0): # 0 = all descendants
children = [
f for f in self.pool.get_filesystems()
if f.name.startswith(self.name + '/')]
#logger.debug('%s children: %s', self, children)
if max_depth > 0:
# use number of slashes to count depth
depth = max_depth + self.name.count('/')
children = [f for f in children if f.name.count('/') <= depth]
return children
def get_snapshots(self):
try:
snaps = os.listdir(self.snapshots)
except OSError: # dataset is currently being destroyed, perhaps
return
for x in snaps:
yield Snapshot(self.name, x)
def create(self, create_parents=False, from_stream=None):
if not self.get_parent().exists():
if create_parents:
#logger.debug('%s: need to create %s', self, self.get_parent())
self.get_parent().create(create_parents=True)
else:
raise ZzzFSException(
'%s: parent filesystem missing' % self.name)
# create relative symlink into pool data
target = os.path.join('..', '..', 'data', self.poolless_name)
try:
os.makedirs(os.path.join(self.root, target))
except OSError:
# already exists
pass
os.symlink(target, self.data)
os.makedirs(self.properties)
os.makedirs(self.snapshots)
#logger.debug('%s: pointed %s at %s', self, self.data, target)
if from_stream:
# for receive command: inverse of Snapshot.to_stream
try:
# gzip needs a seekable object, not a stream
#XXX this entails fitting the entire snapshot itno memeory
buf = io.BytesIO(from_stream.read())
buf.seek(0)
with gzip.GzipFile(fileobj=buf) as g:
with tarfile.TarFile(fileobj=g) as t:
#logger.debug('files in stream: %s', t.getnames())
# extract into snapshots directory
t.extractall(self.snapshots)
# "rollback" filesystem to snapshot just received
self.rollback_to(
Snapshot(self.name, os.listdir(self.snapshots)[0]))
except Exception as e:
# if anything goes wrong, destroy target filesystem and exit
self.destroy()
raise ZzzFSException(e)
#logger.debug(
# 'after creating %s, filesystems in %s: %s', self, self.pool,
# self.pool.get_filesystems())
def destroy(self, recursive=False):
dependencies = [
f for f in self.pool.get_filesystems()
if f.name.startswith(self.name + '/')]
#logger.debug('%s dependencies: %s', self, dependencies)
if len(dependencies) > 0 and not recursive:
raise ZzzFSException(
'cannot destroy %r: filesystem has children\n'
'use \'-r\' to destroy the following datasets:\n'
'%s' % (self.name, '\n'.join(f.name for f in dependencies)))
# user may have already deleted data
if os.path.exists(self.mountpoint):
shutil.rmtree(self.mountpoint)
shutil.rmtree(self.root)
# delete any child filesystems
for f in dependencies:
f.destroy(recursive)
def rollback_to(self, snapshot):
shutil.rmtree(self.mountpoint)
shutil.copytree(snapshot.data, self.mountpoint)
# restore any local properties
if os.path.exists(snapshot.properties):
shutil.rmtree(self.properties)
shutil.copytree(snapshot.properties, self.properties)
def rename(self, new_dataset):
# re-create relative symlink into pool data
target = os.path.join('..', '..', 'data', new_dataset.poolless_name)
try:
os.makedirs(os.path.join(new_dataset.root, target))
except OSError:
# already exists
pass
# move each component individually
os.symlink(target, new_dataset.data)
# shutil.move treats destination as parent if it is a directory
#logger.debug(
# '%s: %s -> %s', self, self.mountpoint, new_dataset.mountpoint)
os.rmdir(new_dataset.mountpoint)
shutil.move(self.mountpoint, new_dataset.mountpoint)
shutil.move(self.properties, new_dataset.root)
shutil.move(self.snapshots, new_dataset.root)
# all data has been moved
self.destroy()
class Snapshot(Dataset):
def __init__(self, filesystem, snapshot):
self.filesystem = Filesystem(filesystem)
self.name = snapshot
self.full_name = '%s@%s' % (filesystem, snapshot)
self.root = os.path.join(self.filesystem.root, 'snapshots', self.name)
self.pool = self.filesystem.pool
@property
def base_attrs(self):
data = super(Snapshot, self).base_attrs
data['name'] = self.full_name
data['creation'] = self.creation
return data
def exists(self):
return os.path.exists(self.root)
def create(self):
os.makedirs(self.root)
shutil.copytree(self.filesystem.data, self.data)
if os.path.exists(self.filesystem.properties):
shutil.copytree(self.filesystem.properties, self.properties)
else:
# no local properties associated with current working filesystem;
# use an empty directory for the snapshot's filesystem
os.makedirs(self.properties)
def rename(self, new_snapshot):
os.rename(self.root, new_snapshot.root)
def clone_to(self, new_filesystem):
new_filesystem.create()
#logger.debug('%s: cloning to %s', self, new_filesystem.mountpoint)
# remove folders to be replaced by copytree
#logger.debug(
# '%s: %s -> %s', self, self.data, new_filesystem.mountpoint)
os.rmdir(new_filesystem.mountpoint)
os.rmdir(new_filesystem.properties)
shutil.copytree(self.data, new_filesystem.mountpoint)
shutil.copytree(self.properties, new_filesystem.properties)
def to_stream(self, stream):
# write a gzipped tar of the snapshot to the stream
with gzip.GzipFile(fileobj=stream, mode='w') as g:
with tarfile.open(fileobj=g, mode='w') as t:
t.add(self.root, arcname=self.name) | zzzfs | /zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/dataset.py | dataset.py |
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import os
import sys
import shutil
import filecmp
from libzzzfs.dataset import (
get_all_datasets, get_dataset_by, Filesystem, Pool, Snapshot)
from libzzzfs.util import tabulated, validate_component_name, ZzzFSException
# Each method returns a string to be written to stdout, or a dataset (or list
# of datasets) affected by the command.
def clone(snapshot, filesystem):
'''Turn a snapshot into a filesystem with a new name.'''
dataset1 = get_dataset_by(snapshot, should_be=Snapshot)
dataset2 = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset1.clone_to(dataset2)
dataset2.add_local_property('origin', dataset1.full_name)
return [dataset1, dataset2]
def create(filesystem, create_parents, properties):
'''Create a filesystem.'''
dataset = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset.create(create_parents)
for keyval in properties:
dataset.add_local_property(keyval.key, keyval.val)
return dataset
def destroy(filesystem, recursive):
'''Remove a filesystem.'''
dataset = get_dataset_by(filesystem, should_be=Filesystem)
dataset.destroy(recursive)
return dataset
def diff(identifier, other_identifier):
'''Diff a snapshot against another snapshot in the same filesystem, or
against the current working filesystem.
'''
dataset1 = get_dataset_by(identifier, should_be=Snapshot)
if other_identifier is not None:
dataset2 = get_dataset_by(other_identifier)
else:
# compare against current version of filesystem
dataset2 = dataset1.filesystem
# real ZFS can't diff snapshots in different filesystem; not so in ZzzFS
#if isinstance(dataset2, Filesystem) and (
# dataset1.filesystem.name != dataset2.filesystem.name):
# raise ZzzFSException(
# '%s: cannot compare to a different filesystem' % identifier)
output = []
def do_diff(dcmp):
# trim off pool root from diff output
base_path = dcmp.left[len(dataset1.data)+1:]
for name in dcmp.diff_files:
output.append('M\t%s' % os.path.join(base_path, name))
for name in dcmp.left_only:
output.append('-\t%s' % os.path.join(base_path, name))
for name in dcmp.right_only:
output.append('+\t%s' % os.path.join(base_path, name))
for sub_dcmp in dcmp.subdirs.values():
do_diff(sub_dcmp)
do_diff(filecmp.dircmp(dataset1.data, dataset2.data))
return '\n'.join(output)
def get(properties, identifiers, headers, sources, scriptable_mode, recursive,
max_depth, types):
'''Get a set of properties for a set of datasets.'''
all_headers = ['name', 'property', 'value', 'source']
if headers.items == ['all']:
headers.items = all_headers
headers.validate_against(all_headers)
sources.validate_against(['local', 'inherited'])
attrs = []
for dataset in get_all_datasets(identifiers, types, recursive, max_depth):
if properties.items == ['all']:
if 'local' in sources.items:
for key, val in dataset.get_local_properties().items():
attrs.append({
'name': dataset.name, 'property': key, 'value': val,
'source': 'local'})
if 'inherited' in sources.items:
for key, val in dataset.get_inherited_properties().items():
attrs.append({
'name': dataset.name, 'property': key, 'value': val,
'source': 'inherited'})
else:
for p in properties.items:
val, source = dataset.get_property_and_source(p)
if source in sources.items:
attrs.append({
'name': dataset.name, 'property': p, 'value': val,
'source': source})
return tabulated(attrs, headers, scriptable_mode)
def inherit(property, identifiers):
'''Remove a local property from a set of datasets.'''
if not validate_component_name(property):
raise ZzzFSException('%s: invalid property' % property)
datasets = [get_dataset_by(identifier) for identifier in identifiers]
for dataset in datasets:
try:
os.remove(os.path.join(dataset.properties, property))
except OSError:
# property was not set locally
pass
return datasets
def list(identifiers, types, scriptable_mode, headers, recursive, max_depth,
sort_asc, sort_desc):
'''Tabulate a set of properties for a set of datasets.'''
records = []
for d in get_all_datasets(identifiers, types, recursive, max_depth):
records.append(dict((h, d.get_property(h)) for h in headers.names))
return tabulated(records, headers, scriptable_mode, sort_asc, sort_desc)
def promote(clone_filesystem):
'''Turn a cloned snapshot into a standalone filesystem.'''
# Since there are no actual dependencies in ZzzFS, simply unset 'origin'.
dataset = get_dataset_by(
clone_filesystem, should_be=Filesystem, should_exist=True)
dataset.remove_local_property('origin')
return dataset
def receive(filesystem, stream=sys.stdin):
'''Create a new filesystem pre-populated with the contens of a snapshot
sent via zzzfs send piped through stdin.
'''
dataset = get_dataset_by(
filesystem, should_be=Filesystem, should_exist=False)
dataset.create(from_stream=stream)
return dataset
def rename(identifier, other_identifier):
'''Move or rename the dataset.'''
dataset1 = get_dataset_by(identifier)
dataset2 = None # may be filesystem or snapshot, will check below
if isinstance(dataset1, Snapshot):
if not '@' in other_identifier:
# second argument might be snapshot alone, which we'd interpret as
# a filesystem; e.g. "rename fs@snapshot new_snapshot"
other_identifier = '%s@%s' % (
dataset1.filesystem.name, other_identifier)
# re-identify with should_exist
dataset2 = get_dataset_by(
other_identifier, should_be=Snapshot, should_exist=False)
# both snapshots
if dataset1.filesystem.name != dataset2.filesystem.name:
raise ZzzFSException('mismatched filesystems')
else: # dataset1 is a filesystem
dataset2 = get_dataset_by(
other_identifier, should_be=Filesystem, should_exist=False)
if dataset1.pool.name != dataset2.pool.name:
raise ZzzFSException('cannot rename to different pool')
# same procedure whether filesystem or snapshot
dataset1.rename(dataset2)
return [dataset1, dataset2]
def rollback(snapshot):
'''Replace the filesystem with the contents of the spceified snapshot.'''
dataset = get_dataset_by(snapshot, should_be=Snapshot)
dataset.filesystem.rollback_to(dataset)
return dataset
def send(snapshot, stream=sys.stdout):
'''Create a gzipped tarball of a snapshot and write it to sdout.'''
dataset = get_dataset_by(snapshot, should_be=Snapshot)
dataset.to_stream(stream)
return dataset
def set(keyval, identifiers):
'''Set a property value for a set of datasets.'''
datasets = [get_dataset_by(identifier) for identifier in identifiers]
for dataset in datasets:
dataset.add_local_property(keyval.key, keyval.val)
return datasets
def snapshot(snapshots, properties):
'''Create a snapshot of a filesystem.'''
for i in snapshots:
dataset = get_dataset_by(i, should_be=Snapshot, should_exist=False)
if not dataset.filesystem.exists():
raise ZzzFSException(
'%s: no such filesystem' % dataset.filesystem.name)
dataset.create()
for keyval in properties:
dataset.add_local_property(keyval.key, keyval.val)
yield dataset | zzzfs | /zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/zfs.py | zfs.py |
# Copyright (c) 2015 Daniel W. Steinbrook. All rights reserved.
import argparse
from libzzzfs.util import PropertyAssignment, PropertyList
class CommandInterpreter(object):
'''Base class for ZzzfsCommandInterpreter/ZzzpoolCommandInterpreter'''
def __init__(self, argv):
self.parser = argparse.ArgumentParser()
self.interpret()
# generate dict of argument keys/values
self.args = self.parser.parse_args(argv)
self.params = dict(self.args._get_kwargs())
del self.params['command']
class ZzzfsCommandInterpreter(CommandInterpreter):
def interpret(self):
subparsers = self.parser.add_subparsers(
dest='command', title='subcommands')
# per-command arguments
clone = subparsers.add_parser(
'clone', help='turn a snapshot into a filesystem with a new name')
clone.add_argument('snapshot')
clone.add_argument('filesystem')
create = subparsers.add_parser('create', help='create a filesystem')
create.add_argument('filesystem')
create.add_argument(
'-p', action='store_true', dest='create_parents',
help='create missing parent filesystems')
create.add_argument(
'-o', metavar='property=value', action='append', dest='properties',
default=[], type=PropertyAssignment,
help='set the specified property')
destroy = subparsers.add_parser('destroy', help='destroy a filesystem')
destroy.add_argument('filesystem')
destroy.add_argument(
'-r', action='store_true', dest='recursive',
help='destroy child filesystems')
diff = subparsers.add_parser(
'diff', help='compare filesystem/snapshot against a snapshot')
diff.add_argument('identifier', metavar='snapshot')
diff.add_argument(
'other_identifier', metavar='snapshot|filesystem', nargs='?')
get = subparsers.add_parser('get', help='get dataset properties')
recursive_or_depth = get.add_mutually_exclusive_group()
recursive_or_depth.add_argument(
'-r', action='store_true', dest='recursive',
help='display all children')
recursive_or_depth.add_argument(
'-d', metavar='depth', type=int, dest='max_depth', default=0,
help='number of child generations to display')
get.add_argument(
'properties', metavar='all | property[,property...]',
type=PropertyList, help='comma-separated list of properties')
get.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
get.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
get.add_argument(
'-o', metavar='all | field[,field...]', type=PropertyList,
default=PropertyList('all'), dest='headers',
help='comma-separated list of fields (name, property, value, source)')
get.add_argument(
'-t', metavar='type[,type...]', dest='types', type=PropertyList,
default=PropertyList('filesystem'),
help='comma-separated list of types (all, filesystem, snapshot)')
get.add_argument(
'-s', metavar='source[,source...]', type=PropertyList,
dest='sources', default=PropertyList('local,inherited'),
help='comma-separated list of sources (local, inherited)')
inherit = subparsers.add_parser(
'inherit', help='unset a property from datasets')
inherit.add_argument('property')
inherit.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
list_ = subparsers.add_parser('list', help='list datasets')
recursive_or_depth = list_.add_mutually_exclusive_group()
recursive_or_depth.add_argument(
'-r', action='store_true', dest='recursive',
help='display all children')
recursive_or_depth.add_argument(
'-d', metavar='depth', type=int, dest='max_depth', default=0,
help='number of child generations to display')
list_.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
list_.add_argument(
'-o', metavar='property[,property...]', dest='headers',
type=PropertyList, help='comma-separated list of properties',
default=PropertyList('name,used,available,refer,mountpoint'))
list_.add_argument(
'-t', metavar='type[,type...]', dest='types', type=PropertyList,
default=PropertyList('filesystem'),
help='comma-separated list of types (all, filesystem, snapshot)')
list_.add_argument(
'-s', metavar='property', dest='sort_asc', action='append',
default=[], help='sort by property (ascending)')
list_.add_argument(
'-S', metavar='property', dest='sort_desc', action='append',
default=[], help='sort by property (descending)')
list_.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='*')
promote = subparsers.add_parser(
'promote',
help='turn a cloned snapshot into a standalone filesystem')
promote.add_argument('clone_filesystem')
receive = subparsers.add_parser(
'receive', help='create a new filesystem from "zzzfs send" output')
receive.add_argument('filesystem')
rename = subparsers.add_parser(
'rename', help='move or rename a dataset')
rename.add_argument('identifier', metavar='filesystem|snapshot')
rename.add_argument('other_identifier', metavar='filesystem|snapshot')
rollback = subparsers.add_parser(
'rollback', help='replace a filesystem with a snapshot')
rollback.add_argument('snapshot')
send = subparsers.add_parser(
'send', help='serialize snapshot into a data stream')
send.add_argument('snapshot')
set_ = subparsers.add_parser(
'set', help='set a property value for a dataset')
set_.add_argument(
'keyval', metavar='property=value', type=PropertyAssignment)
set_.add_argument(
'identifiers', metavar='filesystem|snapshot', nargs='+')
snap = subparsers.add_parser(
'snapshot', help='create snapshots of filesystems')
snap.add_argument('snapshots', metavar='filesystem@snapname', nargs='+')
snap.add_argument(
'-o', metavar='property=value', action='append', dest='properties',
default=[], type=PropertyAssignment,
help='set the specified property')
class ZzzpoolCommandInterpreter(CommandInterpreter):
def interpret(self):
subparsers = self.parser.add_subparsers(
dest='command', title='subcommands')
# per-command arguments
create = subparsers.add_parser('create', help='create a pool')
create.add_argument('pool_name', metavar='pool', help='pool name')
create.add_argument('disk', help='directory in which to create pool')
destroy = subparsers.add_parser('destroy', help='destroy a pool')
destroy.add_argument('pool_name', metavar='pool', help='pool name')
history = subparsers.add_parser(
'history', help='display pool command history')
history.add_argument(
'pool_names', metavar='pool', nargs='*', default=[],
help='pool name')
history.add_argument(
'-l', action='store_true', dest='long_format',
help='show log records in long format')
list_ = subparsers.add_parser('list', help='list pools and properties')
list_.add_argument(
'pool_name', nargs='?', default=None, help='pool name')
list_.add_argument(
'-H', action='store_true', dest='scriptable_mode',
help='scripted mode (no headers, tab-delimited)')
list_.add_argument(
'-o', metavar='property[,...]', type=PropertyList, dest='headers',
default=PropertyList('name,size,alloc,free,cap,health,altroot'),
help='comma-separated list of properties') | zzzfs | /zzzfs-0.1.2.tar.gz/zzzfs-0.1.2/libzzzfs/interpreter.py | interpreter.py |
Requests: Python utils - Time
=========================
Get demand time from all kinds of original time format after delay without specified type.
Usage
---------------
def get_time(ts=None, delay=0, fmt=19)
Arguments: [original_time] [delay] [output_fmt:{0,6,8,10,16,17,19}]
output_fmt:
19: '%Y-%m-%d %H:%M:%S',
8: '%Y%m%d',
6: '%Y%m',
10: '%Y-%m-%d',
16: '%Y-%m-%d %H:%M',
17: '%Y%m%d-%H:%M:%S'
Input format(Any one of them)
------------
# 201809
# 20180910
# 2018-09-10
# 2018-09-10 18:00
# 20180910-18:00:00
# 2018-09-10 18:00:00
# 1536573600(int)
# 1536573600.00(float)
Return format(Any one of them)
-------------
# 201809
# 20180910
# 2018-09-10
# 2018-09-10 18:00
# 20180910-18:00:00
# 2018-09-10 18:00:00
# 1536573600(int)
| zzzutils | /zzzutils-0.1.7.tar.gz/zzzutils-0.1.7/README.rst | README.rst |