This commit is contained in:
赵敬皓 2024-12-25 12:18:49 +08:00
parent 5163bf819c
commit 0c7fff1116
10 changed files with 21413 additions and 0 deletions

9
Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM pytorch/pytorch:2.2.0-cuda11.8-cudnn8-runtime
WORKDIR /app
COPY . /app/
RUN pip install --upgrade pip -i https://pypi.tuna.tsinghua.edu.cn/simple --no-cache-dir
RUN pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple --no-cache-dir
CMD ["python3", "run.py"]

29
local_encoder.py Normal file
View File

@ -0,0 +1,29 @@
from transformers import AutoTokenizer, AutoModel
import torch
def load_model(path):
tokenizer = AutoTokenizer.from_pretrained(path)
model = AutoModel.from_pretrained(path)
model.eval()
return tokenizer, model
def embedding(tokenizer,model , sentences):
"""_summary_
Args:
tokenizer (_type_): 分词器
model (_type_): 向量模型
sentences (_type_): 句子list
Returns:
_type_: 向量长度为1024list
"""
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
with torch.no_grad():
model_output = model(**encoded_input)
# Perform pooling. In this case, cls pooling.
sentence_embeddings = model_output[0][:, 0]
# normalize embeddings
sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)
return sentence_embeddings.cpu().numpy().tolist()

View File

@ -0,0 +1,40 @@
{
"_name_or_path": "/root/.cache/torch/sentence_transformers/BAAI_bge-large-zh/",
"architectures": [
"BertModel"
],
"attention_probs_dropout_prob": 0.1,
"bos_token_id": 0,
"classifier_dropout": null,
"directionality": "bidi",
"eos_token_id": 2,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 1024,
"id2label": {
"0": "LABEL_0"
},
"initializer_range": 0.02,
"intermediate_size": 4096,
"label2id": {
"LABEL_0": 0
},
"layer_norm_eps": 1e-12,
"max_position_embeddings": 512,
"model_type": "bert",
"num_attention_heads": 16,
"num_hidden_layers": 24,
"output_past": true,
"pad_token_id": 0,
"pooler_fc_size": 768,
"pooler_num_attention_heads": 12,
"pooler_num_fc_layers": 3,
"pooler_size_per_head": 128,
"pooler_type": "first_token_transform",
"position_embedding_type": "absolute",
"torch_dtype": "float32",
"transformers_version": "4.30.0",
"type_vocab_size": 2,
"use_cache": true,
"vocab_size": 21128
}

View File

@ -0,0 +1,7 @@
{
"__version__": {
"sentence_transformers": "2.2.2",
"transformers": "4.28.1",
"pytorch": "1.13.0+cu117"
}
}

View File

@ -0,0 +1,20 @@
[
{
"idx": 0,
"name": "0",
"path": "",
"type": "sentence_transformers.models.Transformer"
},
{
"idx": 1,
"name": "1",
"path": "1_Pooling",
"type": "sentence_transformers.models.Pooling"
},
{
"idx": 2,
"name": "2",
"path": "2_Normalize",
"type": "sentence_transformers.models.Normalize"
}
]

View File

@ -0,0 +1,4 @@
{
"max_seq_length": 512,
"do_lower_case": true
}

View File

@ -0,0 +1,7 @@
{
"cls_token": "[CLS]",
"mask_token": "[MASK]",
"pad_token": "[PAD]",
"sep_token": "[SEP]",
"unk_token": "[UNK]"
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,15 @@
{
"clean_up_tokenization_spaces": true,
"cls_token": "[CLS]",
"do_basic_tokenize": true,
"do_lower_case": true,
"mask_token": "[MASK]",
"model_max_length": 1000000000000000019884624838656,
"never_split": null,
"pad_token": "[PAD]",
"sep_token": "[SEP]",
"strip_accents": null,
"tokenize_chinese_chars": true,
"tokenizer_class": "BertTokenizer",
"unk_token": "[UNK]"
}

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
transformers==4.33.0
Flask==3.0.0
numpy==1.23.5
logzero==1.7.0