forked from Argithun/Knowledge-to-Jailbreak
-
Notifications
You must be signed in to change notification settings - Fork 0
/
4_mutate_to_jailbreak.py
90 lines (71 loc) · 3.9 KB
/
4_mutate_to_jailbreak.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
from easyjailbreak.selector.RandomSelector import RandomSelectPolicy
from easyjailbreak.datasets import JailbreakDataset, Instance
from easyjailbreak.seed import SeedTemplate
from easyjailbreak.mutation.generation import Rephrase
from easyjailbreak.models import from_pretrained
from sentence_transformers import SentenceTransformer, util
from easyjailbreak.metrics.Evaluator.Evaluator_ClassificationGetScore import EvaluatorClassificationGetScore
import torch
import json
from transformers import HfArgumentParser, AutoTokenizer
from tools.inference_models import GenerationArguments, get_inference_model
from tools.generate_response import MyGenerationArguments, generate_response, generate_response_light
parser = HfArgumentParser(MyGenerationArguments)
generation_args = parser.parse_args_into_dataclasses()[0]
model = get_inference_model(generation_args)
model_1 = from_pretrained('/data2/tsq/WaterBench/data/models/llama-2-7b-chat-hf',
'llama-2', dtype=torch.bfloat16, max_new_tokens=200)
evaluator = EvaluatorClassificationGetScore(model_1)
# todo 使用 easyjailbreak.mutation.rule/generation 下不同的变异策略进行对比实验
model_2 = from_pretrained('/data3/MODELS/vicuna-7b-v1.5',
'stable-vicuna', dtype=torch.bfloat16, max_new_tokens=500)
mutation = Rephrase(model_2)
havent_jailbreak_list = list()
had_jailbreak_list = list()
with open('./data/3_prompt_and_knowledge.json', 'r', encoding='utf-8') as file:
for line in file:
item = json.loads(line)
prompt_with_knowledge = item['knowledge'][0] + '. ' + item['prompt']
havent_jailbreak_list.append({'prompt': item['prompt'],
'subject': item['subject'],
'knowledge': item['knowledge'][0],
'prompt_with_knowledge': prompt_with_knowledge,
'mutated_prompts': [prompt_with_knowledge],
'mutated_prompt_malicious_scores': []})
times = 5
# 记录每次改写生成的 prompt、分数
while times > 0:
prompts_with_knowledge = [d['mutated_prompts'][-1] for d in havent_jailbreak_list]
responses = generate_response_light(model, prompts_with_knowledge)
for i in range(len(responses)):
score = 0
cnt = 0
for response in responses[i].keys():
cnt += 1
instance = Instance(query=item['prompt'], jailbreak_prompt="", target_responses=[response])
evaluator._evaluate(instance)
score = int(instance.eval_results[-1])
if cnt == 1:
break
# score = score * 1.0 / cnt
# havent_jailbreak_list[i]['mutated_prompts'].append(prompts_with_knowledge[i])
havent_jailbreak_list[i]['mutated_prompt_malicious_scores'].append(score)
for item in havent_jailbreak_list:
if item['mutated_prompt_malicious_scores'][-1] >= 5:
had_jailbreak_list.append(item)
for item in had_jailbreak_list:
if item in havent_jailbreak_list:
havent_jailbreak_list.remove(item)
with open('./data/can_jailbreak_with_knowledge.json', 'w') as json_file:
json.dump(had_jailbreak_list, json_file, ensure_ascii=False, indent=4)
with open('./data/cant_jailbreak_with_knowledge.json', 'w') as json_file:
json.dump(havent_jailbreak_list, json_file, ensure_ascii=False, indent=4)
times -= 1
if times == 0:
break
prompts_with_knowledge = [d['mutated_prompts'][-1] for d in havent_jailbreak_list]
instances = [Instance(jailbreak_prompt=sentence) for sentence in prompts_with_knowledge]
jailbreak_dataset = JailbreakDataset(instances)
result = mutation(jailbreak_dataset)
for i in range(len(prompts_with_knowledge)):
havent_jailbreak_list[i]['mutated_prompts'].append(result[i].jailbreak_prompt)