1 Star 0 Fork 13

Wuming/keentune_brain

forked from anolis/keentune_brain 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
test.py 4.38 KB
一键复制 编辑 原始数据 按行查看 历史
# Copyright (c) 2021-2023 Alibaba Cloud Computing Ltd.
# SPDX-License-Identifier: MulanPSL-2.0
import os
import signal
import random
import numpy as np
TEST_KNOBS = [
{
"name" :"knobs_a",
"domain" :"sysctl",
"range" :[0, 100],
"dtype" :"int",
"step" :1,
"base" :50
},
{
"name" :"knobs_b",
"domain" :"sysctl",
"options" :["0", "1","2"],
"dtype" :"string",
"base" :"0"
},
{
"name" :"knobs_c",
"domain" :"sysctl",
"sequence" :["0", "1","2"],
"dtype" :"string",
"base" :"0"
},
]
TEST_BASELINE = {
"total":{
"base" :[126.5,126,127],
"negative" :True,
"weight" :100,
"strict" :False
}
}
TEST_FEEDBACK = {"total":[127.0, 127.5, 127.9]}
TEST_RULELIST = []
TEST_EPOCH = 50
TEST_TOPN = 10
TEST_THRESHOLD = 0.8
def testTuning(name, algorithm, iteration, parameters, baseline, rule_list):
if algorithm == "hord":
from brain.algorithm.tuning.hord import HORD
optimizer = HORD(name, iteration, parameters, baseline,rule_list)
if algorithm == "random":
from brain.algorithm.tuning.random import Random
optimizer = Random(name, iteration, parameters, baseline,rule_list)
if algorithm == "lamcts":
from brain.algorithm.tuning.lamcts import LamctsOptim
optimizer = LamctsOptim(name, iteration, parameters, baseline,rule_list)
if algorithm == "bgcs":
from brain.algorithm.tuning.lamcts import LamctsOptim
optimizer = LamctsOptim(name, iteration, parameters, baseline,rule_list)
optimizer.getDataHead()
for i in range(iteration):
_FEEDBACK = {}
_FEEDBACK['total'] = np.array(TEST_FEEDBACK['total'] ) * (1 + random.random())
optimizer.acquire()
optimizer.feedback(iteration = i, bench_score = _FEEDBACK)
optimizer.msg()
optimizer.best()
def testTuningProc(name, iteration, parameters, baseline, rule_list):
from brain.controller.process import TuningProcess
p = TuningProcess(name, "random", iteration, parameters, baseline, rule_list)
p.start()
p.out_q[1].recv()
for i in range(iteration):
p.cmd_q[0].send("acquire")
_ = p.out_q[1].recv()
p.input_q[0].send((i, TEST_FEEDBACK))
p.cmd_q[0].send("feedback")
_ = p.out_q[1].recv()
p.cmd_q[0].send("best")
_ = p.out_q[1].recv()
p.teardown()
os.kill(p.pid, signal.SIGKILL)
def testSensi(trials, explainer):
from brain.algorithm.sensitize.sensitize import sensitize
_ = sensitize(
data_name = "test_sensi",
trials = trials,
explainer = explainer,
epoch = TEST_EPOCH,
topN = TEST_TOPN,
threshold = TEST_THRESHOLD
)
def testSensiProc():
from brain.controller.process import SensitizeProcess
p = SensitizeProcess(
trials = 1,
data_name = "test_sensi",
explainer = "LASSO",
response_ip = "127.0.0.1",
response_port = "8888"
)
p.start()
p.join()
def testOther():
from brain.common.dataset import listData, deleteFile
testTuning("test_other", "random", 10, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
print(listData())
deleteFile("test_other")
if __name__ == "__main__":
# test tuning
testTuning("test_random", "random", 30, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
testTuning("test_hord", "hord", 30, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
testTuning("test_lamcts", "lamcts", 30, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
testTuning("test_bgcs", "bgcs", 30, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
# test tuning process
testTuningProc("test_random", 10, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
# ready test data
testTuning("test_sensi", "random", 10, TEST_KNOBS, TEST_BASELINE, TEST_RULELIST)
# test sensi
testSensi(3, "Xsen")
testSensi(3, "SHAPKernel")
testSensi(3, "XGBTotalGain")
testSensi(3, "LASSO")
testSensi(3, "MI")
testSensi(3, "GP")
# test sensi process
testSensiProc()
# test other function
testOther()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/dingpengs/keentune_brain.git
[email protected]:dingpengs/keentune_brain.git
dingpengs
keentune_brain
keentune_brain
master

搜索帮助