Browse Source

无法继续实现,需要继续研究

master
Viviman 2 weeks ago
parent
commit
2fdc8ca4a5
  1. 2
      main.py
  2. 3
      scripts/train.py

2
main.py

@ -25,6 +25,7 @@ pattern = r"(\d{4}\.\d{2})--(\d{4}\.\d{2}|至今)\s*([^\n]+)"
# 查找所有匹配项 # 查找所有匹配项
matches = re.findall(pattern, text) matches = re.findall(pattern, text)
# 定义标签规则 # 定义标签规则
def get_label_and_details(content): def get_label_and_details(content):
if "学习" in content: if "学习" in content:
@ -70,6 +71,7 @@ def get_label_and_details(content):
details = {} details = {}
return "工作经历", details return "工作经历", details
# 整理结果 # 整理结果
results = [] results = []
for match in matches: for match in matches:

3
scripts/train.py

@ -8,12 +8,14 @@ from transformers import (
DataCollatorForTokenClassification DataCollatorForTokenClassification
) )
# 加载数据 # 加载数据
def load_data(file_path): def load_data(file_path):
with open(file_path, "r", encoding="utf-8") as f: with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f) data = json.load(f)
return data return data
# 数据预处理 # 数据预处理
def tokenize_and_align_labels(examples, tokenizer): def tokenize_and_align_labels(examples, tokenizer):
tokenized_inputs = tokenizer(examples["text"], truncation=True, padding=True, is_split_into_words=True) tokenized_inputs = tokenizer(examples["text"], truncation=True, padding=True, is_split_into_words=True)
@ -32,6 +34,7 @@ def tokenize_and_align_labels(examples, tokenizer):
tokenized_inputs["labels"] = labels tokenized_inputs["labels"] = labels
return tokenized_inputs return tokenized_inputs
# 标签映射 # 标签映射
label2id = { label2id = {
"TIME": 0, "TIME": 0,

Loading…
Cancel
Save