From 002ec14f84d421b247b9d6ee67f14b4080600a37 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 3 Nov 2020 20:57:10 +0200 Subject: [PATCH 01/13] Remove the difference between repos and schedules --- backend/actions/actions.py | 120 ++++++++++------------ backend/actions/yaml_validation.py | 16 +-- backend/apis/results.py | 98 +++++------------- backend/apis/trigger.py | 44 ++++---- backend/models/initial_config.py | 4 +- backend/models/{trigger_run.py => run.py} | 10 +- backend/models/run_config.py | 4 +- backend/models/schedule_info.py | 9 +- backend/models/scheduler_run.py | 19 ---- backend/utils/reporter.py | 100 +++++++----------- 10 files changed, 162 insertions(+), 262 deletions(-) rename backend/models/{trigger_run.py => run.py} (76%) delete mode 100644 backend/models/scheduler_run.py diff --git a/backend/actions/actions.py b/backend/actions/actions.py index cf45e86d..20fdb36e 100644 --- a/backend/actions/actions.py +++ b/backend/actions/actions.py @@ -13,8 +13,7 @@ from kubernetes.client import V1EnvVar from models.initial_config import InitialConfig from models.run_config import RunConfig -from models.scheduler_run import SchedulerRun -from models.trigger_run import TriggerModel, TriggerRun +from models.run import Run from packages.vcs.vcs import VCSFactory from utils.reporter import Reporter from utils.utils import Utils @@ -37,7 +36,7 @@ class Actions(Validator): _REPOS_DIR = "/zeroci/code/vcs_repos" _BIN_DIR = "/zeroci/bin/" run_id = None - model_obj = None + run_obj = None def test_run(self, job): """Runs tests and store the result in DB. @@ -67,8 +66,8 @@ def normal_run(self, job_name, line): os.remove(file_path) name = "{job_name}: {test_name}".format(job_name=job_name, test_name=line["name"]) - self.model_obj.result.append({"type": type, "status": status, "name": name, "content": result}) - self.model_obj.save() + self.run_obj.result.append({"type": type, "status": status, "name": name, "content": result}) + self.run_obj.save() if response.returncode in [137, 124]: return False return True @@ -84,8 +83,8 @@ def neph_run(self, job_name, line): status = FAILURE name = "{job_name}:{test_name}".format(job_name=job_name, test_name=line["name"]) - self.model_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": response.stdout}) - self.model_obj.save() + self.run_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": response.stdout}) + self.run_obj.save() for key in r.keys(): key = key.decode() @@ -99,8 +98,8 @@ def neph_run(self, job_name, line): status = FAILURE all_logs += log["content"] name = key.split(f"neph:{self.run_id}:")[-1] - self.model_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": all_logs}) - self.model_obj.save() + self.run_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": all_logs}) + self.run_obj.save() if response.returncode in [137, 124]: return False @@ -133,8 +132,8 @@ def build(self, job, clone_details, job_number): r.rpush(self.run_id, result) if not installed: - self.model_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": name, "content": result}) - self.model_obj.save() + self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": name, "content": result}) + self.run_obj.save() return deployed, installed @@ -142,19 +141,16 @@ def cal_status(self): """Calculate the status of the whole tests result has been stored on the BD's id. """ status = SUCCESS - for result in self.model_obj.result: + for result in self.run_obj.result: if result["status"] != SUCCESS: status = result["status"] - self.model_obj.status = status - self.model_obj.save() + self.run_obj.status = status + self.run_obj.save() def _get_run_env(self): """Get run environment variables. """ - if isinstance(self.model_obj, TriggerModel): - name = self.model_obj.repo - else: - name = self.model_obj.schedule_name + name = self.run_obj.repo run_config = RunConfig(name=name) run_env = run_config.env env = [] @@ -164,8 +160,8 @@ def _get_run_env(self): return env def _load_yaml(self): - vcs_obj = VCSFactory().get_cvn(repo=self.model_obj.repo) - script = vcs_obj.get_content(ref=self.model_obj.commit, file_path="zeroCI.yaml") + vcs_obj = VCSFactory().get_cvn(repo=self.run_obj.repo) + script = vcs_obj.get_content(ref=self.run_obj.commit, file_path="zeroCI.yaml") if script: try: return yaml.safe_load(script) @@ -175,37 +171,32 @@ def _load_yaml(self): msg = "zeroCI.yaml is not found on the repository's home" r.rpush(self.run_id, msg) - self.model_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - self.model_obj.save() + self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + self.run_obj.save() return False def repo_clone_details(self): """Clone repo. """ configs = InitialConfig() - repo_remote_path = os.path.join(self._REPOS_DIR, self.model_obj.repo) - clone_url = urljoin(configs.vcs_host, f"{self.model_obj.repo}.git") + repo_remote_path = os.path.join(self._REPOS_DIR, self.run_obj.repo) + clone_url = urljoin(configs.vcs_host, f"{self.run_obj.repo}.git") cmd = """git clone {clone_url} {repo_remote_path} --branch {branch} cd {repo_remote_path} git reset --hard {commit} """.format( clone_url=clone_url, repo_remote_path=repo_remote_path, - branch=self.model_obj.branch, - commit=self.model_obj.commit, + branch=self.run_obj.branch, + commit=self.run_obj.commit, ) clone_details = {"cmd": cmd, "remote_path": repo_remote_path} return clone_details def _prepare_bin_dirs(self, bin_remote_path): self.bin_name = bin_remote_path.split(os.path.sep)[-1] - if isinstance(self.model_obj, TriggerModel): - release = self.model_obj.commit[:7] - local_path = os.path.join(self._BIN_DIR, self.model_obj.repo, self.model_obj.branch) - else: - release = str(datetime.fromtimestamp(self.model_obj.timestamp)).replace(" ", "_")[:16] - local_path = os.path.join(self._BIN_DIR, self.model_obj.schedule_name) - + release = self.run_obj.commit[:7] + local_path = os.path.join(self._BIN_DIR, self.run_obj.repo, self.run_obj.branch) bin_release = f"{self.bin_name}_{release}" bin_local_path = os.path.join(local_path, bin_release) if not os.path.exists(local_path): @@ -223,17 +214,17 @@ def _get_bin(self, bin_remote_path, job_number): container.ssh_get_remote_file(remote_path=bin_tmp_path, local_path=bin_local_path) if os.path.exists(bin_local_path): - self.model_obj.bin_release = bin_release - self.model_obj.save() + self.run_obj.bin_release = bin_release + self.run_obj.save() def _set_bin(self): - if self.model_obj.bin_release: + if self.run_obj.bin_release: bin_local_path = self._prepare_bin_dirs(self.bin_name) bin_remote_path = os.path.join(self._BIN_DIR, self.bin_name) container.ssh_set_remote_file(remote_path=bin_remote_path, local_path=bin_local_path) container.ssh_command(f"chmod +x {bin_remote_path}") - def build_and_test(self, id, schedule_name=None, script=None): + def build_and_test(self, id): """Builds, runs tests, calculates status and gives report on telegram and your version control system. :param id: DB's id of this run details. @@ -242,13 +233,12 @@ def build_and_test(self, id, schedule_name=None, script=None): :param schedule_name: str """ self.run_id = id - if not schedule_name: - self.model_obj = TriggerRun.get(id=self.run_id) - script = self._load_yaml() - else: - self.model_obj = SchedulerRun.get(id=self.run_id) + + self.run_obj = TriggerRun.get(id=self.run_id) + script = self._load_yaml() + if script: - valid = self.validate_yaml(run_id=self.run_id, model_obj=self.model_obj, script=script) + valid = self.validate_yaml(run_id=self.run_id, run_obj=self.run_obj, script=script) if valid: clone_details = self.repo_clone_details() worked = deployed = installed = True @@ -273,27 +263,27 @@ def build_and_test(self, id, schedule_name=None, script=None): container.delete() r.rpush(self.run_id, "hamada ok") self.cal_status() - reporter.report(run_id=self.run_id, model_obj=self.model_obj, schedule_name=schedule_name) + reporter.report(run_id=self.run_id, run_obj=self.run_obj) - def schedule_run(self, job): - """Builds, runs tests, calculates status and gives report on telegram. + # def schedule_run(self, job): + # """Builds, runs tests, calculates status and gives report on telegram. - :param schedule_name: the name of the scheduled run. - :type schedule_name: str - :param script: the script that should run your schedule. - :type script: str - """ - triggered_by = job.get("triggered_by", "ZeroCI Scheduler") - data = { - "status": PENDING, - "timestamp": int(datetime.now().timestamp()), - "schedule_name": job["schedule_name"], - "triggered_by": triggered_by, - "bin_release": None, - } - scheduler_run = SchedulerRun(**data) - scheduler_run.save() - id = str(scheduler_run.id) - data["id"] = id - r.publish("zeroci_status", json.dumps(data)) - self.build_and_test(id=id, schedule_name=job["schedule_name"], script=job) + # :param schedule_name: the name of the scheduled run. + # :type schedule_name: str + # :param script: the script that should run your schedule. + # :type script: str + # """ + # triggered_by = job.get("triggered_by", "ZeroCI Scheduler") + # data = { + # "status": PENDING, + # "timestamp": int(datetime.now().timestamp()), + # "schedule_name": job["schedule_name"], + # "triggered_by": triggered_by, + # "bin_release": None, + # } + # scheduler_run = SchedulerRun(**data) + # scheduler_run.save() + # id = str(scheduler_run.id) + # data["id"] = id + # r.publish("zeroci_status", json.dumps(data)) + # self.build_and_test(id=id, schedule_name=job["schedule_name"], script=job) diff --git a/backend/actions/yaml_validation.py b/backend/actions/yaml_validation.py index 7d94495f..4387b032 100644 --- a/backend/actions/yaml_validation.py +++ b/backend/actions/yaml_validation.py @@ -102,12 +102,6 @@ def _validate_job_name(self, name): return msg - def _report(self, run_id, model_obj, msg): - msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" - redis.rpush(run_id, msg) - model_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - model_obj.save() - def _validate_job(self, job): job_name = job.get("name") msg = self._validate_job_name(job_name) @@ -133,7 +127,13 @@ def _validate_job(self, job): msg = self._validate_prerequisites(prerequisites) return msg - def validate_yaml(self, run_id, model_obj, script): + def _report(self, run_id, run_obj, msg): + msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" + redis.rpush(run_id, msg) + run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + run_obj.save() + + def validate_yaml(self, run_id, run_obj, script): jobs = script.get("jobs") if not jobs: msg = "jobs should be in yaml and shouldn't be empty" @@ -149,6 +149,6 @@ def validate_yaml(self, run_id, model_obj, script): if msg: break if msg: - self._report(run_id=run_id, model_obj=model_obj, msg=msg) + self._report(run_id=run_id, run_obj=run_obj, msg=msg) return False return True diff --git a/backend/apis/results.py b/backend/apis/results.py index 3b551196..9483c2fb 100644 --- a/backend/apis/results.py +++ b/backend/apis/results.py @@ -3,9 +3,7 @@ from apis.base import app, check_configs from bottle import abort, redirect, request, static_file from models.initial_config import InitialConfig -from models.schedule_info import ScheduleInfo -from models.scheduler_run import SchedulerRun -from models.trigger_run import TriggerRun +from models.run import Run from packages.vcs.vcs import VCSFactory @@ -21,16 +19,13 @@ def home(): """Return repos and schedules which are running on the server. """ configs = InitialConfig() - result = {"repos": [], "schedules": []} - result["repos"] = configs.repos - result["schedules"] = ScheduleInfo.distinct("name") - result_json = json.dumps(result) - return result_json + result = {"repos": configs.repos} + return json.dumps(result) @app.route("/api/repos/") @check_configs -def branch(repo): +def result(repo): """Returns tests ran on this repo with specific branch or test details if id is sent. :param repo: repo's name @@ -41,46 +36,22 @@ def branch(repo): id = request.query.get("id") if id: - trigger_run = TriggerRun.get(id=id) - live = True if trigger_run.status == PENDING else False - result = json.dumps({"live": live, "result": trigger_run.result}) - return result + run = Run.get(id=id) + live = True if run.status == PENDING else False + return json.dumps({"live": live, "result": run.result}) + if branch: fields = ["status", "commit", "committer", "timestamp", "bin_release", "triggered_by"] where = {"repo": repo, "branch": branch} - trigger_runs = TriggerRun.get_objects(fields=fields, order_by="timestamp", asc=False, **where) - result = json.dumps(trigger_runs) - return result + runs = Run.get_objects(fields=fields, order_by="timestamp", asc=False, **where) + return json.dumps(runs) vcs_obj = VCSFactory().get_cvn(repo=repo) exist_branches = vcs_obj.get_branches() - all_branches = TriggerRun.distinct(field="branch", repo=repo) + all_branches = Run.distinct(field="branch", repo=repo) deleted_branches = list(set(all_branches) - set(exist_branches)) branches = {"exist": exist_branches, "deleted": deleted_branches} - result = json.dumps(branches) - return result - - -@app.route("/api/schedules/") -@check_configs -def schedules(schedule): - """Returns tests ran on this schedule or test details if id is sent. - - :param schedule: schedule's name - :param id: DB id of test details. - """ - id = request.query.get("id") - if id: - scheduler_run = SchedulerRun.get(id=id) - live = True if scheduler_run.status == PENDING else False - result = json.dumps({"live": live, "result": scheduler_run.result}) - return result - - fields = ["status", "timestamp", "bin_release", "triggered_by"] - where = {"schedule_name": schedule} - scheduler_runs = SchedulerRun.get_objects(fields=fields, order_by="timestamp", asc=False, **where) - result = json.dumps(scheduler_runs) - return result + return json.dumps(branches) @app.route("/status") @@ -88,39 +59,24 @@ def schedules(schedule): def status(): """Returns repo's branch or schedule status for your version control system. """ - schedule = request.query.get("schedule") repo = request.query.get("repo") branch = request.query.get("branch") result = request.query.get("result") # to return the run result fields = ["status"] configs = InitialConfig() - if schedule: - where = {"schedule_name": schedule, "status": f"{ERROR} OR {FAILURE} OR {SUCCESS}"} - scheduler_run = SchedulerRun.get_objects(fields=fields, order_by="timestamp", asc=False, **where) - if len(scheduler_run) == 0: - return abort(404) - - if result: - link = f"{configs.domain}/schedules/{schedule}?id={str(scheduler_run[0]['id'])}" - return redirect(link) - if scheduler_run[0]["status"] == SUCCESS: - return static_file("svgs/build_passing.svg", mimetype="image/svg+xml", root=".") - else: - return static_file("svgs/build_failing.svg", mimetype="image/svg+xml", root=".") - - elif repo: - if not branch: - branch = "master" - where = {"repo": repo, "branch": branch, "status": f"{ERROR} OR {FAILURE} OR {SUCCESS}"} - trigger_run = TriggerRun.get_objects(fields=fields, order_by="timestamp", asc=False, **where) - if len(trigger_run) == 0: - return abort(404) - if result: - link = f"{configs.domain}/repos/{repo.replace('/', '%2F')}/{branch}/{str(trigger_run[0]['id'])}" - return redirect(link) - if trigger_run[0]["status"] == SUCCESS: - return static_file("svgs/build_passing.svg", mimetype="image/svg+xml", root=".") - else: - return static_file("svgs/build_failing.svg", mimetype="image/svg+xml", root=".") - return abort(404) + if not repo: + return abort(400, "repo is missing") + if not branch: + return abort(400, "branch is missing") + where = {"repo": repo, "branch": branch, "status": f"{ERROR} OR {FAILURE} OR {SUCCESS}"} + run = Run.get_objects(fields=fields, order_by="timestamp", asc=False, **where) + if len(run) == 0: + return abort(404) + if result: + link = f"{configs.domain}/repos/{repo.replace('/', '%2F')}/{branch}/{str(run[0]['id'])}" + return redirect(link) + if run[0]["status"] == SUCCESS: + return static_file("svgs/build_passing.svg", mimetype="image/svg+xml", root=".") + else: + return static_file("svgs/build_failing.svg", mimetype="image/svg+xml", root=".") diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 4aa46cfb..428b142d 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -9,7 +9,7 @@ from apis.base import app, check_configs, user from bottle import HTTPResponse, redirect, request from models.initial_config import InitialConfig -from models.trigger_run import TriggerRun +from models.run import Run from packages.vcs.vcs import VCSFactory BIN_DIR = "/zeroci/bin/" @@ -26,29 +26,29 @@ def trigger(repo="", branch="", commit="", committer="", id=None, triggered=True timestamp = datetime.now().timestamp() if id: # Triggered from id. - trigger_run = TriggerRun.get(id=id) + run = Run.get(id=id) triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") data = { "timestamp": timestamp, - "commit": trigger_run.commit, - "committer": trigger_run.committer, + "commit": run.commit, + "committer": run.committer, "status": status, - "repo": trigger_run.repo, - "branch": trigger_run.branch, + "repo": run.repo, + "branch": run.branch, "triggered_by": triggered_by, "bin_release": None, "id": id, } - trigger_run.timestamp = int(timestamp) - trigger_run.status = status - trigger_run.result = [] - trigger_run.triggered_by = triggered_by - if trigger_run.bin_release: - bin_path = os.path.join(BIN_DIR, trigger_run.repo, trigger_run.branch, trigger_run.bin_release) + run.timestamp = int(timestamp) + run.status = status + run.result = [] + run.triggered_by = triggered_by + if run.bin_release: + bin_path = os.path.join(BIN_DIR, run.repo, run.branch, run.bin_release) if os.path.exists(bin_path): os.remove(bin_path) - trigger_run.bin_release = None - trigger_run.save() + run.bin_release = None + run.save() for key in redis.keys(): if id in key.decode(): redis.delete(key) @@ -69,15 +69,15 @@ def trigger(repo="", branch="", commit="", committer="", id=None, triggered=True "triggered_by": triggered_by, "bin_release": None, } - trigger_run = TriggerRun(**data) - trigger_run.save() - id = str(trigger_run.id) + run = Run(**data) + run.save() + id = str(run.id) data["id"] = id redis.publish("zeroci_status", json.dumps(data)) if id: - link = f"{configs.domain}/repos/{trigger_run.repo}/{trigger_run.branch}/{str(trigger_run.id)}" - vcs_obj = VCSFactory().get_cvn(repo=trigger_run.repo) - vcs_obj.status_send(status=status, link=link, commit=trigger_run.commit) + link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.id)}" + vcs_obj = VCSFactory().get_cvn(repo=run.repo) + vcs_obj.status_send(status=status, link=link, commit=run.commit) job = q.enqueue_call(func=actions.build_and_test, args=(id,), result_ttl=5000, timeout=20000) return job return None @@ -119,7 +119,7 @@ def run_trigger(): if request.headers.get("Content-Type") == "application/json": id = request.json.get("id") if id: - run = TriggerRun.get(id=id) + run = Run.get(id=id) if run.status == PENDING: return HTTPResponse( f"There is a running job for this id {id}, please try again after this run finishes", 503 @@ -133,7 +133,7 @@ def run_trigger(): last_commit = vcs_obj.get_last_commit(branch=branch) committer = vcs_obj.get_committer(commit=last_commit) where = {"repo": repo, "branch": branch, "commit": last_commit, "status": PENDING} - run = TriggerRun.get_objects(fields=["status"], **where) + run = Run.get_objects(fields=["status"], **where) if run: return HTTPResponse( f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 diff --git a/backend/models/initial_config.py b/backend/models/initial_config.py index c6ea3626..26116036 100644 --- a/backend/models/initial_config.py +++ b/backend/models/initial_config.py @@ -27,6 +27,6 @@ def _get_vcs_type(self, vcs_host): class InitialConfig(ModelFactory): _model = StoredFactory(InitialConfigModel) - def __new__(self, **kwargs): + def __new__(cls, **kwargs): name = "Initial_config" - return self._model.get(name=name, **kwargs) + return cls._model.get(name=name, **kwargs) diff --git a/backend/models/trigger_run.py b/backend/models/run.py similarity index 76% rename from backend/models/trigger_run.py rename to backend/models/run.py index aa42ed80..2c205598 100644 --- a/backend/models/trigger_run.py +++ b/backend/models/run.py @@ -1,7 +1,7 @@ from .base import Document, ModelFactory, fields, StoredFactory -class TriggerModel(Document): +class RunModel(Document): timestamp = fields.Integer(required=True, indexed=True) repo = fields.String(required=True) branch = fields.String(required=True) @@ -13,10 +13,10 @@ class TriggerModel(Document): result = fields.List(field=fields.Typed(dict)) -class TriggerRun(ModelFactory): - _model = StoredFactory(TriggerModel) +class Run(ModelFactory): + _model = StoredFactory(RunModel) - def __new__(self, **kwargs): + def __new__(cls, **kwargs): name = "model" + str(int(kwargs["timestamp"] * 10 ** 6)) kwargs["timestamp"] = int(kwargs["timestamp"]) - return self._model.new(name=name, **kwargs) + return cls._model.new(name=name, **kwargs) diff --git a/backend/models/run_config.py b/backend/models/run_config.py index e3c8f3c9..c40830dd 100644 --- a/backend/models/run_config.py +++ b/backend/models/run_config.py @@ -8,6 +8,6 @@ class RunConfigModel(Document): class RunConfig(ModelFactory): _model = StoredFactory(RunConfigModel) - def __new__(self, **kwargs): + def __new__(cls, **kwargs): kwargs["name"] = kwargs["name"].replace("/", "_") - return self._model.get(**kwargs) + return cls._model.get(**kwargs) diff --git a/backend/models/schedule_info.py b/backend/models/schedule_info.py index 54499146..09d4a3a9 100644 --- a/backend/models/schedule_info.py +++ b/backend/models/schedule_info.py @@ -2,17 +2,14 @@ class ScheduleInfoModel(Document): - prerequisites = fields.Typed(dict) - install = fields.String(required=True) - script = fields.List(field=fields.Typed(dict)) run_time = fields.String(required=True) - bin_path = fields.String() + branch = fields.String(required=True) created_by = fields.String(required=True) class ScheduleInfo(ModelFactory): _model = StoredFactory(ScheduleInfoModel) - def __new__(self, **kwargs): + def __new__(cls, **kwargs): name = kwargs["schedule_name"] - return self._model.new(name=name, **kwargs) + return cls._model.new(name=name, **kwargs) diff --git a/backend/models/scheduler_run.py b/backend/models/scheduler_run.py deleted file mode 100644 index 00ad8302..00000000 --- a/backend/models/scheduler_run.py +++ /dev/null @@ -1,19 +0,0 @@ -from .base import Document, ModelFactory, fields, StoredFactory - - -class ScheduleModel(Document): - timestamp = fields.Integer(required=True, indexed=True) - schedule_name = fields.String(required=True) - status = fields.String(required=True) - bin_release = fields.String() - triggered_by = fields.String(default="ZeroCI Scheduler") - result = fields.List(field=fields.Typed(dict)) - - -class SchedulerRun(ModelFactory): - _model = StoredFactory(ScheduleModel) - - def __new__(self, **kwargs): - name = "model" + str(int(kwargs["timestamp"] * 10 ** 6)) - kwargs["timestamp"] = int(kwargs["timestamp"]) - return self._model.new(name=name, **kwargs) diff --git a/backend/utils/reporter.py b/backend/utils/reporter.py index 4b2740a6..feadc065 100644 --- a/backend/utils/reporter.py +++ b/backend/utils/reporter.py @@ -13,7 +13,7 @@ class Reporter: - def report(self, run_id, model_obj, schedule_name=None): + def report(self, run_id, run_obj): """Report the result to the commit status and Telegram chat. :param run_id: DB's run_id of this run details. @@ -25,71 +25,47 @@ def report(self, run_id, model_obj, schedule_name=None): """ configs = InitialConfig() telegram = Telegram() - bin_release = model_obj.bin_release - triggered_by = model_obj.triggered_by - msg = self.report_msg(status=model_obj.status, schedule_name=schedule_name) - if not schedule_name: - url = f"/repos/{model_obj.repo}/{model_obj.branch}/{model_obj.id}" - link = urljoin(configs.domain, url) - if bin_release: - bin_url = f"/bin/{model_obj.repo}/{model_obj.branch}/{bin_release}" - bin_link = urljoin(configs.domain, bin_url) - else: - bin_link = None - data = { - "timestamp": model_obj.timestamp, - "commit": model_obj.commit, - "committer": model_obj.committer, - "status": model_obj.status, - "repo": model_obj.repo, - "branch": model_obj.branch, - "bin_release": bin_release, - "triggered_by": triggered_by, - "id": run_id, - } - r.publish("zeroci_status", json.dumps(data)) - vcs_obj = VCSFactory().get_cvn(repo=model_obj.repo) - vcs_obj.status_send(status=model_obj.status, link=link, commit=model_obj.commit) - telegram.send_msg( - msg=msg, - link=link, - repo=model_obj.repo, - branch=model_obj.branch, - commit=model_obj.commit, - committer=model_obj.committer, - bin_link=bin_link, - triggered_by=triggered_by, - ) + bin_release = run_obj.bin_release + triggered_by = run_obj.triggered_by + msg = self.report_msg(status=run_obj.status) + url = f"/repos/{run_obj.repo}/{run_obj.branch}/{run_obj.id}" + link = urljoin(configs.domain, url) + if bin_release: + bin_url = f"/bin/{run_obj.repo}/{run_obj.branch}/{bin_release}" + bin_link = urljoin(configs.domain, bin_url) else: - unspaced_schedule = model_obj.schedule_name.replace(" ", "%20") - url = f"/schedules/{unspaced_schedule}/{model_obj.id}" - link = urljoin(configs.domain, url) - if bin_release: - bin_url = f"/bin/{unspaced_schedule}/{bin_release}" - bin_link = urljoin(configs.domain, bin_url) - else: - bin_link = None - data = { - "status": model_obj.status, - "timestamp": model_obj.timestamp, - "schedule_name": schedule_name, - "bin_release": bin_release, - "triggered_by": triggered_by, - "id": run_id, - } - r.publish("zeroci_status", json.dumps(data)) - telegram.send_msg(msg=msg, link=link, bin_link=bin_link, triggered_by=triggered_by) + bin_link = None + data = { + "timestamp": run_obj.timestamp, + "commit": run_obj.commit, + "committer": run_obj.committer, + "status": run_obj.status, + "repo": run_obj.repo, + "branch": run_obj.branch, + "bin_release": bin_release, + "triggered_by": triggered_by, + "id": run_id, + } + r.publish("zeroci_status", json.dumps(data)) + vcs_obj = VCSFactory().get_cvn(repo=run_obj.repo) + vcs_obj.status_send(status=run_obj.status, link=link, commit=run_obj.commit) + telegram.send_msg( + msg=msg, + link=link, + repo=run_obj.repo, + branch=run_obj.branch, + commit=run_obj.commit, + committer=run_obj.committer, + bin_link=bin_link, + triggered_by=triggered_by, + ) - def report_msg(self, status, schedule_name=None): - if schedule_name: - name = f"{schedule_name} tests" - else: - name = "Tests" + def report_msg(self, status): if status == SUCCESS: - msg = f"✅ {name} passed " + msg = f"✅ Run passed " elif status == FAILURE: - msg = f"❌ {name} failed " + msg = f"❌ Run failed " else: - msg = f"⛔️ {name} errored " + msg = f"⛔️ Run errored " return msg From 4b27b3bba57c3f9eec06fab538e85623c4cd04f8 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Mon, 14 Dec 2020 18:17:25 +0200 Subject: [PATCH 02/13] WIP: load config in server instead of loading it in worker --- backend/{utils => actions}/reporter.py | 0 backend/actions/{actions.py => runner.py} | 26 +---- backend/actions/trigger.py | 107 ++++++++++++++++++ .../{yaml_validation.py => validator.py} | 19 ++-- backend/apis/trigger.py | 99 ++++------------ backend/utils/constants.py | 0 6 files changed, 141 insertions(+), 110 deletions(-) rename backend/{utils => actions}/reporter.py (100%) rename backend/actions/{actions.py => runner.py} (92%) create mode 100644 backend/actions/trigger.py rename backend/actions/{yaml_validation.py => validator.py} (91%) create mode 100644 backend/utils/constants.py diff --git a/backend/utils/reporter.py b/backend/actions/reporter.py similarity index 100% rename from backend/utils/reporter.py rename to backend/actions/reporter.py diff --git a/backend/actions/actions.py b/backend/actions/runner.py similarity index 92% rename from backend/actions/actions.py rename to backend/actions/runner.py index 20fdb36e..785a2233 100644 --- a/backend/actions/actions.py +++ b/backend/actions/runner.py @@ -8,14 +8,14 @@ import requests import yaml -from actions.yaml_validation import Validator +from actions.validator import Validator from deployment.container import Container from kubernetes.client import V1EnvVar from models.initial_config import InitialConfig from models.run_config import RunConfig from models.run import Run from packages.vcs.vcs import VCSFactory -from utils.reporter import Reporter +from actions.reporter import Reporter from utils.utils import Utils container = Container() @@ -32,7 +32,7 @@ NEPH_TYPE = "neph" -class Actions(Validator): +class Runner: _REPOS_DIR = "/zeroci/code/vcs_repos" _BIN_DIR = "/zeroci/bin/" run_id = None @@ -159,22 +159,6 @@ def _get_run_env(self): env.append(env_var) return env - def _load_yaml(self): - vcs_obj = VCSFactory().get_cvn(repo=self.run_obj.repo) - script = vcs_obj.get_content(ref=self.run_obj.commit, file_path="zeroCI.yaml") - if script: - try: - return yaml.safe_load(script) - except: - msg = traceback.format_exc() - else: - msg = "zeroCI.yaml is not found on the repository's home" - - r.rpush(self.run_id, msg) - self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - self.run_obj.save() - return False - def repo_clone_details(self): """Clone repo. """ @@ -205,7 +189,7 @@ def _prepare_bin_dirs(self, bin_remote_path): return bin_local_path def _get_bin(self, bin_remote_path, job_number): - if bin_remote_path and job_number == 0: + if bin_remote_path and not job_number: bin_local_path = self._prepare_bin_dirs(bin_remote_path) bin_release = bin_local_path.split(os.path.sep)[-1] bin_tmp_path = os.path.join(self._BIN_DIR, bin_release) @@ -234,7 +218,7 @@ def build_and_test(self, id): """ self.run_id = id - self.run_obj = TriggerRun.get(id=self.run_id) + self.run_obj = Run.get(id=self.run_id) script = self._load_yaml() if script: diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py new file mode 100644 index 00000000..f21ff7e4 --- /dev/null +++ b/backend/actions/trigger.py @@ -0,0 +1,107 @@ +import json +import os +import traceback +from datetime import datetime + +import yaml +from bottle import request +from models.initial_config import InitialConfig +from models.run import Run +from packages.vcs.vcs import VCSFactory +from redis import Redis +from rq import Queue + +from actions.runner import Runner + +redis = Redis() +q = Queue(connection=redis) +PENDING = "pending" + +ERROR = "error" +LOG_TYPE = "log" +BIN_DIR = "/zeroci/bin/" + + +class Trigger: + def _load_yaml(self, repo): + vcs_obj = VCSFactory().get_cvn(repo=repo) + script = vcs_obj.get_content(ref=self.run_obj.commit, file_path="zeroCI.yaml") + if script: + try: + return yaml.safe_load(script) + except: + msg = traceback.format_exc() + else: + msg = "zeroCI.yaml is not found on the repository's home" + + redis.rpush(self.run_id, msg) + self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + self.run_obj.save() + return False + + def enqueue(self, repo="", branch="", commit="", committer="", id=None, triggered=True): + configs = InitialConfig() + status = PENDING + timestamp = datetime.now().timestamp() + yaml_config = self._load_yaml(repo) + if yaml_config: + valid = self.validate_yaml(yaml_config) + if valid: + pass #FIXME + if id: + # Triggered from id. + run = Run.get(id=id) + triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") + data = { + "timestamp": timestamp, + "commit": run.commit, + "committer": run.committer, + "status": status, + "repo": run.repo, + "branch": run.branch, + "triggered_by": triggered_by, + "bin_release": None, + "id": id, + } + run.timestamp = int(timestamp) + run.status = status + run.result = [] + run.triggered_by = triggered_by + if run.bin_release: + bin_path = os.path.join(BIN_DIR, run.repo, run.branch, run.bin_release) + if os.path.exists(bin_path): + os.remove(bin_path) + run.bin_release = None + run.save() + for key in redis.keys(): + if id in key.decode(): + redis.delete(key) + redis.publish("zeroci_status", json.dumps(data)) + else: + # Triggered from vcs webhook or rebuild using the button. + if repo in configs.repos: + triggered_by = "VCS Hook" + if triggered: + triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") + data = { + "timestamp": timestamp, + "commit": commit, + "committer": committer, + "status": status, + "repo": repo, + "branch": branch, + "triggered_by": triggered_by, + "bin_release": None, + } + run = Run(**data) + run.save() + id = str(run.id) + data["id"] = id + redis.publish("zeroci_status", json.dumps(data)) + if id: + link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.id)}" + vcs_obj = VCSFactory().get_cvn(repo=run.repo) + vcs_obj.status_send(status=status, link=link, commit=run.commit) + job = q.enqueue_call(func=Runner.build_and_test, args=(id,), result_ttl=5000, timeout=20000) + return job + return None diff --git a/backend/actions/yaml_validation.py b/backend/actions/validator.py similarity index 91% rename from backend/actions/yaml_validation.py rename to backend/actions/validator.py index 4387b032..4c9ea670 100644 --- a/backend/actions/yaml_validation.py +++ b/backend/actions/validator.py @@ -127,14 +127,14 @@ def _validate_job(self, job): msg = self._validate_prerequisites(prerequisites) return msg - def _report(self, run_id, run_obj, msg): - msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" - redis.rpush(run_id, msg) - run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - run_obj.save() - - def validate_yaml(self, run_id, run_obj, script): - jobs = script.get("jobs") + # def _report(self, run_id, run_obj, msg): + # msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" + # redis.rpush(run_id, msg) + # run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + # run_obj.save() + + def validate_yaml(self, config): + jobs = config.get("jobs") if not jobs: msg = "jobs should be in yaml and shouldn't be empty" else: @@ -149,6 +149,5 @@ def validate_yaml(self, run_id, run_obj, script): if msg: break if msg: - self._report(run_id=run_id, run_obj=run_obj, msg=msg) - return False + return msg return True diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 428b142d..1799d297 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -1,88 +1,17 @@ import json -import os -from datetime import datetime -from redis import Redis -from rq import Queue - -from actions.actions import Actions from apis.base import app, check_configs, user from bottle import HTTPResponse, redirect, request from models.initial_config import InitialConfig from models.run import Run from packages.vcs.vcs import VCSFactory +from actions.trigger import Trigger -BIN_DIR = "/zeroci/bin/" +trigger = Trigger() -redis = Redis() -actions = Actions() -q = Queue(connection=redis) PENDING = "pending" -def trigger(repo="", branch="", commit="", committer="", id=None, triggered=True): - configs = InitialConfig() - status = PENDING - timestamp = datetime.now().timestamp() - if id: - # Triggered from id. - run = Run.get(id=id) - triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") - data = { - "timestamp": timestamp, - "commit": run.commit, - "committer": run.committer, - "status": status, - "repo": run.repo, - "branch": run.branch, - "triggered_by": triggered_by, - "bin_release": None, - "id": id, - } - run.timestamp = int(timestamp) - run.status = status - run.result = [] - run.triggered_by = triggered_by - if run.bin_release: - bin_path = os.path.join(BIN_DIR, run.repo, run.branch, run.bin_release) - if os.path.exists(bin_path): - os.remove(bin_path) - run.bin_release = None - run.save() - for key in redis.keys(): - if id in key.decode(): - redis.delete(key) - redis.publish("zeroci_status", json.dumps(data)) - else: - # Triggered from vcs webhook or rebuild using the button. - if repo in configs.repos: - triggered_by = "VCS Hook" - if triggered: - triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") - data = { - "timestamp": timestamp, - "commit": commit, - "committer": committer, - "status": status, - "repo": repo, - "branch": branch, - "triggered_by": triggered_by, - "bin_release": None, - } - run = Run(**data) - run.save() - id = str(run.id) - data["id"] = id - redis.publish("zeroci_status", json.dumps(data)) - if id: - link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.id)}" - vcs_obj = VCSFactory().get_cvn(repo=run.repo) - vcs_obj.status_send(status=status, link=link, commit=run.commit) - job = q.enqueue_call(func=actions.build_and_test, args=(id,), result_ttl=5000, timeout=20000) - return job - return None - - @app.route("/git_trigger", method=["POST"]) @check_configs def git_trigger(): @@ -90,6 +19,7 @@ def git_trigger(): """ configs = InitialConfig() if request.headers.get("Content-Type") == "application/json": + job = "" # push case reference = request.json.get("ref") if reference: @@ -102,10 +32,21 @@ def git_trigger(): committer = request.json["pusher"]["login"] branch_exist = not commit.startswith("000000") if branch_exist: - job = trigger(repo=repo, branch=branch, commit=commit, committer=committer, triggered=False) - if job: - return HTTPResponse(job.get_id(), 200) - return HTTPResponse("Done", 200) + job = trigger.enqueue(repo=repo, branch=branch, commit=commit, committer=committer, triggered=False) + + # pull case + # TODO: Handle the request for gitea. + elif request.json.get("pull_request"): + if request.json.get("action") in ["opened", "synchronize"]: + repo = request.json["pull_request"]["head"]["repo"]["full_name"] + current_branch = request.json["pull_request"]["head"]["ref"] + target_branch = request.json["pull_request"]["base"]["ref"] + commit = request.json["pull_request"]["head"]["sha"] + committer = request.json["sender"]["login"] + job = trigger.enqueue(repo=repo, branch=current_branch, commit=commit, committer=committer, triggered=False) + if job: + return HTTPResponse(job.get_id(), 201) + return HTTPResponse("Nothing to be done", 200) return HTTPResponse("Wrong content type", 400) @@ -124,7 +65,7 @@ def run_trigger(): return HTTPResponse( f"There is a running job for this id {id}, please try again after this run finishes", 503 ) - job = trigger(id=id) + job = trigger.enqueue(id=id) return HTTPResponse(job.get_id(), 200) repo = request.json.get("repo") @@ -139,7 +80,7 @@ def run_trigger(): f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 ) if last_commit: - job = trigger(repo=repo, branch=branch, commit=last_commit, committer=committer) + job = trigger.enqueue(repo=repo, branch=branch, commit=last_commit, committer=committer) else: return HTTPResponse(f"Couldn't get last commit from this branch {branch}, please try again", 503) if job: diff --git a/backend/utils/constants.py b/backend/utils/constants.py new file mode 100644 index 00000000..e69de29b From 50dc4f6d44f17a09da5a82b081b75d90a04e7b67 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Mon, 21 Dec 2020 12:13:39 +0200 Subject: [PATCH 03/13] Use run_id instead of id --- backend/actions/reporter.py | 4 +-- backend/actions/runner.py | 14 +++++------ backend/actions/trigger.py | 43 +++++++++++++++++--------------- backend/apis/results.py | 8 +++--- backend/apis/schedule.py | 2 +- backend/apis/trigger.py | 10 ++++---- backend/apis/websockets.py | 14 +++++------ backend/deployment/container.py | 24 +++++++++--------- backend/health/cleanup.py | 2 +- backend/health/health_recover.py | 4 +-- backend/models/base.py | 8 +++--- 11 files changed, 68 insertions(+), 65 deletions(-) diff --git a/backend/actions/reporter.py b/backend/actions/reporter.py index feadc065..f3c6c33f 100644 --- a/backend/actions/reporter.py +++ b/backend/actions/reporter.py @@ -28,7 +28,7 @@ def report(self, run_id, run_obj): bin_release = run_obj.bin_release triggered_by = run_obj.triggered_by msg = self.report_msg(status=run_obj.status) - url = f"/repos/{run_obj.repo}/{run_obj.branch}/{run_obj.id}" + url = f"/repos/{run_obj.repo}/{run_obj.branch}/{run_obj.run_id}" link = urljoin(configs.domain, url) if bin_release: bin_url = f"/bin/{run_obj.repo}/{run_obj.branch}/{bin_release}" @@ -44,7 +44,7 @@ def report(self, run_id, run_obj): "branch": run_obj.branch, "bin_release": bin_release, "triggered_by": triggered_by, - "id": run_id, + "run_id": run_id, } r.publish("zeroci_status", json.dumps(data)) vcs_obj = VCSFactory().get_cvn(repo=run_obj.repo) diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 785a2233..0915954e 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -52,7 +52,7 @@ def test_run(self, job): def normal_run(self, job_name, line): status = SUCCESS - response, file_path = container.run_test(id=self.run_id, run_cmd=line["cmd"]) + response, file_path = container.run_test(run_id=self.run_id, run_cmd=line["cmd"]) result = response.stdout type = LOG_TYPE if response.returncode: @@ -78,7 +78,7 @@ def neph_run(self, job_name, line): yaml_path = line["yaml_path"] neph_id = f"{self.run_id}:{job_name}:{line['name']}" cmd = f"export NEPH_RUN_ID='{neph_id}' \n cd {working_dir} \n /zeroci/bin/neph -y {yaml_path} -m CI" - response = container.execute_command(cmd=cmd, id=self.run_id) + response = container.execute_command(cmd=cmd, run_id=self.run_id) if response.returncode: status = FAILURE @@ -120,7 +120,7 @@ def build(self, job, clone_details, job_number): result = response.stdout r.rpush(self.run_id, result) else: - response = container.execute_command(cmd=job["install"], id=self.run_id) + response = container.execute_command(cmd=job["install"], run_id=self.run_id) if response.returncode: name = "{job_name}: Installation".format(job_name=job["name"]) result = response.stdout @@ -194,7 +194,7 @@ def _get_bin(self, bin_remote_path, job_number): bin_release = bin_local_path.split(os.path.sep)[-1] bin_tmp_path = os.path.join(self._BIN_DIR, bin_release) cmd = f"cp {bin_remote_path} {bin_tmp_path}" - container.execute_command(cmd=cmd, id="", verbose=False) + container.execute_command(cmd=cmd, run_id="", verbose=False) container.ssh_get_remote_file(remote_path=bin_tmp_path, local_path=bin_local_path) if os.path.exists(bin_local_path): @@ -208,7 +208,7 @@ def _set_bin(self): container.ssh_set_remote_file(remote_path=bin_remote_path, local_path=bin_local_path) container.ssh_command(f"chmod +x {bin_remote_path}") - def build_and_test(self, id): + def build_and_test(self, run_id): """Builds, runs tests, calculates status and gives report on telegram and your version control system. :param id: DB's id of this run details. @@ -216,9 +216,9 @@ def build_and_test(self, id): :param schedule_name: it will have a value if the run is scheduled. :param schedule_name: str """ - self.run_id = id + self.run_id = run_id - self.run_obj = Run.get(id=self.run_id) + self.run_obj = Run.get(run_id=self.run_id) script = self._load_yaml() if script: diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index f21ff7e4..3ab6f6e6 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -10,6 +10,7 @@ from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue +from models.run import Run from actions.runner import Runner @@ -23,9 +24,9 @@ class Trigger: - def _load_yaml(self, repo): + def _load_config(self, run_id, repo, commit): vcs_obj = VCSFactory().get_cvn(repo=repo) - script = vcs_obj.get_content(ref=self.run_obj.commit, file_path="zeroCI.yaml") + script = vcs_obj.get_content(ref=commit, file_path="zeroCI.yaml") if script: try: return yaml.safe_load(script) @@ -34,23 +35,19 @@ def _load_yaml(self, repo): else: msg = "zeroCI.yaml is not found on the repository's home" - redis.rpush(self.run_id, msg) - self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - self.run_obj.save() + redis.rpush(run_id, msg) + run_obj = Run.get(run_id=run_id) + run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + run_obj.save() return False - def enqueue(self, repo="", branch="", commit="", committer="", id=None, triggered=True): + def enqueue(self, repo="", branch="", commit="", committer="", run_id=None, triggered=True): configs = InitialConfig() status = PENDING timestamp = datetime.now().timestamp() - yaml_config = self._load_yaml(repo) - if yaml_config: - valid = self.validate_yaml(yaml_config) - if valid: - pass #FIXME - if id: + if run_id: # Triggered from id. - run = Run.get(id=id) + run = Run.get(run_id=run_id) triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") data = { "timestamp": timestamp, @@ -61,7 +58,7 @@ def enqueue(self, repo="", branch="", commit="", committer="", id=None, triggere "branch": run.branch, "triggered_by": triggered_by, "bin_release": None, - "id": id, + "run_id": run_id, } run.timestamp = int(timestamp) run.status = status @@ -74,7 +71,7 @@ def enqueue(self, repo="", branch="", commit="", committer="", id=None, triggere run.bin_release = None run.save() for key in redis.keys(): - if id in key.decode(): + if run_id in key.decode(): redis.delete(key) redis.publish("zeroci_status", json.dumps(data)) else: @@ -95,13 +92,19 @@ def enqueue(self, repo="", branch="", commit="", committer="", id=None, triggere } run = Run(**data) run.save() - id = str(run.id) - data["id"] = id + run_id = str(run.run_id) + data["run_id"] = run_id redis.publish("zeroci_status", json.dumps(data)) - if id: - link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.id)}" + if run_id: + yaml_config = self._load_config(run_id, repo, commit) + if yaml_config: + valid = self.validate_yaml(yaml_config) + if valid: + pass + + link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" vcs_obj = VCSFactory().get_cvn(repo=run.repo) vcs_obj.status_send(status=status, link=link, commit=run.commit) - job = q.enqueue_call(func=Runner.build_and_test, args=(id,), result_ttl=5000, timeout=20000) + job = q.enqueue_call(func=Runner.build_and_test, args=(run_id,), result_ttl=5000, timeout=20000) return job return None diff --git a/backend/apis/results.py b/backend/apis/results.py index 9483c2fb..41c927af 100644 --- a/backend/apis/results.py +++ b/backend/apis/results.py @@ -33,10 +33,10 @@ def result(repo): :param id: DB id of test details. """ branch = request.query.get("branch") - id = request.query.get("id") + run_id = request.query.get("id") - if id: - run = Run.get(id=id) + if run_id: + run = Run.get(run_id=id) live = True if run.status == PENDING else False return json.dumps({"live": live, "result": run.result}) @@ -74,7 +74,7 @@ def status(): if len(run) == 0: return abort(404) if result: - link = f"{configs.domain}/repos/{repo.replace('/', '%2F')}/{branch}/{str(run[0]['id'])}" + link = f"{configs.domain}/repos/{repo.replace('/', '%2F')}/{branch}/{str(run[0]['run_id'])}" return redirect(link) if run[0]["status"] == SUCCESS: return static_file("svgs/build_passing.svg", mimetype="image/svg+xml", root=".") diff --git a/backend/apis/schedule.py b/backend/apis/schedule.py index b7174ae6..ce5c1f21 100644 --- a/backend/apis/schedule.py +++ b/backend/apis/schedule.py @@ -65,7 +65,7 @@ def schedule(): cron_string=job["run_time"], func=actions.schedule_run, args=[job,], - id=job["schedule_name"], + run_id=job["schedule_name"], timeout=-1, ) except: diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 1799d297..c3ff2ae5 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -58,14 +58,14 @@ def run_trigger(): redirect("/") if request.headers.get("Content-Type") == "application/json": - id = request.json.get("id") - if id: - run = Run.get(id=id) + run_id = request.json.get("id") + if run_id: + run = Run.get(run_id=run_id) if run.status == PENDING: return HTTPResponse( - f"There is a running job for this id {id}, please try again after this run finishes", 503 + f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) - job = trigger.enqueue(id=id) + job = trigger.enqueue(run_id=run_id) return HTTPResponse(job.get_id(), 200) repo = request.json.get("repo") diff --git a/backend/apis/websockets.py b/backend/apis/websockets.py index 899e05b2..2f5f915d 100644 --- a/backend/apis/websockets.py +++ b/backend/apis/websockets.py @@ -9,21 +9,21 @@ redis = Redis() -@app.route("/websocket/logs/") -def logs(id): +@app.route("/websocket/logs/") +def logs(run_id): wsock = request.environ.get("wsgi.websocket") if not wsock: abort(400, "Expected WebSocket request.") start = 0 while start != -1: - length = redis.llen(id) + length = redis.llen(run_id) if start > length: break if start == length: sleep(0.01) continue - result_list = redis.lrange(id, start, length) + result_list = redis.lrange(run_id, start, length) if b"hamada ok" in result_list: result_list.remove(b"hamada ok") start = -1 @@ -67,8 +67,8 @@ def neph_logs(neph_id): break -@app.route("/websocket/neph_jobs/") -def neph_jobs(id): +@app.route("/websocket/neph_jobs/") +def neph_jobs(job_id): wsock = request.environ.get("wsgi.websocket") if not wsock: abort(400, "Expected WebSocket request.") @@ -78,7 +78,7 @@ def neph_jobs(id): new_jobs = [] for key in redis.keys(): key = key.decode() - if key.startswith(f"neph:{id}"): + if key.startswith(f"neph:{job_id}"): if key not in jobs: jobs.append(key) key = key.replace(" ", "%20") diff --git a/backend/deployment/container.py b/backend/deployment/container.py index 72fdc559..2224cb11 100644 --- a/backend/deployment/container.py +++ b/backend/deployment/container.py @@ -90,12 +90,12 @@ def ssh_set_remote_file(self, remote_path, local_path, ip=None, port=22): except: return False - def redis_push(self, id, content, verbose=True): + def redis_push(self, run_id, content, verbose=True): if verbose: r = redis.Redis() - r.rpush(id, content) + r.rpush(run_id, content) - def execute_command(self, cmd, id, verbose=True): + def execute_command(self, cmd, run_id, verbose=True): """Execute a command on a remote machine using ssh. :param cmd: command to be executed on a remote machine. @@ -123,7 +123,7 @@ def execute_command(self, cmd, id, verbose=True): ) except: out += "Couldn't run on the testing container, container become unreachable" - self.redis_push(id, out, verbose=verbose) + self.redis_push(run_id, out, verbose=verbose) rc = 137 return Complete_Execution(rc, out) @@ -133,18 +133,18 @@ def execute_command(self, cmd, id, verbose=True): content = response.read_stdout(timeout=600) except: msg = "\nConnectionError: Couldn't execute cmd on the runner" - self.redis_push(id, msg, verbose=verbose) + self.redis_push(run_id, msg, verbose=verbose) out += msg rc = 124 break end = time.time() time_taken = end - start if content: - self.redis_push(id, content, verbose=verbose) + self.redis_push(run_id, content, verbose=verbose) out += content elif time_taken > 590: msg = "\nTimeout exceeded 10 mins with no output" - self.redis_push(id, msg, verbose=verbose) + self.redis_push(run_id, msg, verbose=verbose) out += msg rc = 124 response.close() @@ -154,13 +154,13 @@ def execute_command(self, cmd, id, verbose=True): rc = response.returncode if rc == 137: msg = "Runner expired (job takes more than 1 hour)" - self.redis_push(id, msg, verbose=verbose) + self.redis_push(run_id, msg, verbose=verbose) out += msg return Complete_Execution(rc, out) def get_remote_file(self, remote_path, local_path): - response = self.execute_command(f"cat {remote_path}", id="", verbose=False) + response = self.execute_command(f"cat {remote_path}", run_id="", verbose=False) if not response.returncode: self.write_file(text=response.stdout, file_path=local_path) return True @@ -278,7 +278,7 @@ def delete(self): except: pass - def run_test(self, run_cmd, id): + def run_test(self, run_cmd, run_id): """Run test command and get the result as xml file if the running command is following junit otherwise result will be log. :param run_cmd: test command to be run. @@ -289,14 +289,14 @@ def run_test(self, run_cmd, id): :type env: dict :return: path to xml file if exist and subprocess object containing (returncode, stdout, stderr) """ - response = self.execute_command(run_cmd, id=id) + response = self.execute_command(run_cmd, run_id=run_id) file_path = "/zeroci/xml/{}.xml".format(self.random_string()) remote_path = "/test.xml" copied = self.get_remote_file(remote_path=remote_path, local_path=file_path) if copied: file_path = file_path delete_cmd = f"rm -f {remote_path}" - self.execute_command(delete_cmd, id=id) + self.execute_command(delete_cmd, run_id=run_id) else: if os.path.exists(file_path): os.remove(file_path) diff --git a/backend/health/cleanup.py b/backend/health/cleanup.py index bb59023b..776a5643 100644 --- a/backend/health/cleanup.py +++ b/backend/health/cleanup.py @@ -26,7 +26,7 @@ def remove(factory, days=30): time_diff = now_time - run_time if time_diff.days > days: factory.delete(name) - r.delete(obj.id) + r.delete(obj.run_id) def get_size_in_giga_bytes(path): diff --git a/backend/health/health_recover.py b/backend/health/health_recover.py index 3da16df3..a69ed2f1 100644 --- a/backend/health/health_recover.py +++ b/backend/health/health_recover.py @@ -13,8 +13,8 @@ def redis(self): cmd = "redis-server /etc/redis/redis.conf" self.execute_cmd(cmd=cmd, timeout=TIMEOUT) - def worker(self, id): - cmd = f"/bin/bash -c 'cd {PATH}; python3 worker{id}.py &>> worker_{id}.log &'" + def worker(self, wid): + cmd = f"/bin/bash -c 'cd {PATH}; python3 worker{wid}.py &>> worker_{wid}.log &'" self.execute_cmd(cmd=cmd, timeout=TIMEOUT) def scheduler(self): diff --git a/backend/models/base.py b/backend/models/base.py index 1b89566d..d1e70eb7 100644 --- a/backend/models/base.py +++ b/backend/models/base.py @@ -3,7 +3,7 @@ class Document(Base): @property - def id(self): + def run_id(self): return self.instance_name.strip("model") @property @@ -15,8 +15,8 @@ class ModelFactory: _model = None @classmethod - def get(cls, id): - name = "model" + str(id) + def get(cls, run_id): + name = "model" + str(run_id) return cls._model.find(name) @classmethod @@ -67,7 +67,7 @@ def get_objects(cls, fields, order_by=None, asc=True, **kwargs): obj_dict = {} for field in fields: obj_dict[field] = getattr(obj, field) - obj_dict["id"] = obj.instance_name.strip("model") + obj_dict["run_id"] = obj.instance_name.strip("model") results.append(obj_dict) if order_by: From 42aa73cdd14cd8bd2be37fe4248db28e8ea5cac2 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Mon, 21 Dec 2020 16:32:25 +0200 Subject: [PATCH 04/13] Read yaml file and validate it before spawn the worker --- backend/actions/runner.py | 50 +++++++++++------------- backend/actions/trigger.py | 73 +++++++++++++++++++++++++++--------- backend/actions/validator.py | 49 ++++++++++++++++++++++-- backend/apis/trigger.py | 9 +++-- docs/config/nosetests.yaml | 8 ++++ 5 files changed, 135 insertions(+), 54 deletions(-) diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 0915954e..1655c48d 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -208,7 +208,7 @@ def _set_bin(self): container.ssh_set_remote_file(remote_path=bin_remote_path, local_path=bin_local_path) container.ssh_command(f"chmod +x {bin_remote_path}") - def build_and_test(self, run_id): + def build_and_test(self, run_id, repo_config): """Builds, runs tests, calculates status and gives report on telegram and your version control system. :param id: DB's id of this run details. @@ -217,34 +217,28 @@ def build_and_test(self, run_id): :param schedule_name: str """ self.run_id = run_id - self.run_obj = Run.get(run_id=self.run_id) - script = self._load_yaml() - - if script: - valid = self.validate_yaml(run_id=self.run_id, run_obj=self.run_obj, script=script) - if valid: - clone_details = self.repo_clone_details() - worked = deployed = installed = True - for i, job in enumerate(script["jobs"]): - if not (worked and deployed and installed): - break - log = """ - ****************************************************** - Starting {job_name} job - ****************************************************** - """.format( - job_name=job["name"] - ).replace( - " ", "" - ) - r.rpush(self.run_id, log) - deployed, installed = self.build(job=job, clone_details=clone_details, job_number=i) - if deployed: - if installed: - worked = self.test_run(job=job) - self._get_bin(bin_remote_path=job.get("bin_path"), job_number=i) - container.delete() + clone_details = self.repo_clone_details() + worked = deployed = installed = True + for i, job in enumerate(repo_config["jobs"]): + if not (worked and deployed and installed): + break + log = """ + ****************************************************** + Starting {job_name} job + ****************************************************** + """.format( + job_name=job["name"] + ).replace( + " ", "" + ) + r.rpush(self.run_id, log) + deployed, installed = self.build(job=job, clone_details=clone_details, job_number=i) + if deployed: + if installed: + worked = self.test_run(job=job) + self._get_bin(bin_remote_path=job.get("bin_path"), job_number=i) + container.delete() r.rpush(self.run_id, "hamada ok") self.cal_status() reporter.report(run_id=self.run_id, run_obj=self.run_obj) diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 3ab6f6e6..9b65986e 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -11,6 +11,7 @@ from redis import Redis from rq import Queue from models.run import Run +from actions.validator import Validator from actions.runner import Runner @@ -24,24 +25,54 @@ class Trigger: - def _load_config(self, run_id, repo, commit): + def _load_config(self, repo, commit): vcs_obj = VCSFactory().get_cvn(repo=repo) script = vcs_obj.get_content(ref=commit, file_path="zeroCI.yaml") if script: try: - return yaml.safe_load(script) + config = yaml.safe_load(script) + return True, config, "" except: msg = traceback.format_exc() else: msg = "zeroCI.yaml is not found on the repository's home" + + return False, "", msg - redis.rpush(run_id, msg) - run_obj = Run.get(run_id=run_id) - run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - run_obj.save() - return False + def enqueue(self, repo="", branch="", target_branch="", commit="", committer="", run_id=None, triggered=False): + if run_id: + run = Run.get(run_id=run_id) + repo = run.repo + commit = run.commit + status, config, msg = self._load_config(repo, commit) + if not status: + run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + return self._report(msg, run, run_id) + validator = Validator() + valid, msg = validator.validate_yaml(config) + if not valid: + run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + return self._report(msg, run, run_id) + + if run_id: + run , run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) + return self._trigger(repo_config=config, run=run, run_id=run_id) + + push = config["run_on"]["push"] + pull_request = config["run_on"]["pull_request"] + if push: + trigger_branches = push["branches"] + if branch and branch in trigger_branches: + run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + return self._trigger(repo_config=config, run=run, run_id=run_id) + if pull_request: + target_branches = pull_request["branches"] + if target_branch and target_branch in target_branches: + run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + return self._trigger(repo_config=config, run=run, run_id=run_id) + return - def enqueue(self, repo="", branch="", commit="", committer="", run_id=None, triggered=True): + def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False): configs = InitialConfig() status = PENDING timestamp = datetime.now().timestamp() @@ -95,16 +126,22 @@ def enqueue(self, repo="", branch="", commit="", committer="", run_id=None, trig run_id = str(run.run_id) data["run_id"] = run_id redis.publish("zeroci_status", json.dumps(data)) - if run_id: - yaml_config = self._load_config(run_id, repo, commit) - if yaml_config: - valid = self.validate_yaml(yaml_config) - if valid: - pass - + if run and run_id: + return run, run_id + return None, None + + def _trigger(self, repo_config, run, run_id): + if run and run_id: + configs = InitialConfig() link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" vcs_obj = VCSFactory().get_cvn(repo=run.repo) - vcs_obj.status_send(status=status, link=link, commit=run.commit) - job = q.enqueue_call(func=Runner.build_and_test, args=(run_id,), result_ttl=5000, timeout=20000) + vcs_obj.status_send(status=PENDING, link=link, commit=run.commit) + job = q.enqueue_call(func=Runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) return job - return None + return + + def _report(self, msg, run, run_id): + redis.rpush(run_id, msg) + run.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + run.save() + return diff --git a/backend/actions/validator.py b/backend/actions/validator.py index 4c9ea670..1c191fb3 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -1,7 +1,7 @@ import requests -from redis import Redis +# from redis import Redis -redis = Redis() +# redis = Redis() ERROR = "error" LOG_TYPE = "log" @@ -127,6 +127,43 @@ def _validate_job(self, job): msg = self._validate_prerequisites(prerequisites) return msg + def _validate_run_on(self, run_on): + msg = "" + if not run_on: + msg = "run_on should be in yaml and shouldn't be empty" + else: + if not isinstance(run_on, dict): + msg = "run_on should have push or pull_request as keys" + else: + push = run_on.get("push") + pull_request = run_on.get("pull_request") + if not any([push, pull_request]): + msg = "run_on should have push or pull_request as keys and at least one of them should be filled" + else: + if push: + if not isinstance(push, dict): + msg = "push should have branches as a key" + else: + branches = push.get("branches") + if branches and not isinstance(branches, list): + msg = "branches shouldn't be empty and it should be of contain list of the branches" + else: + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + if pull_request: + if not isinstance(pull_request, dict): + msg = "pull_request should have branches as a key" + else: + branches = pull_request.get("branches") + if branches and not isinstance(branches, list): + msg = "branches shouldn't be empty and it should be of contain list of the branches" + else: + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + return msg + # def _report(self, run_id, run_obj, msg): # msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" # redis.rpush(run_id, msg) @@ -148,6 +185,10 @@ def validate_yaml(self, config): msg = self._validate_job(job) if msg: break + + run_on = config.get("run_on") + msg = self._validate_run_on(run_on) + if msg: - return msg - return True + return False, msg + return True, "" diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index c3ff2ae5..3f3f6306 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -17,6 +17,7 @@ def git_trigger(): """Trigger the test when a post request is sent from a repo's webhook. """ + # TODO: handle the case of running from push and pull request to not run it twice. configs = InitialConfig() if request.headers.get("Content-Type") == "application/json": job = "" @@ -32,7 +33,7 @@ def git_trigger(): committer = request.json["pusher"]["login"] branch_exist = not commit.startswith("000000") if branch_exist: - job = trigger.enqueue(repo=repo, branch=branch, commit=commit, committer=committer, triggered=False) + job = trigger.enqueue(repo=repo, branch=branch, commit=commit, committer=committer) # pull case # TODO: Handle the request for gitea. @@ -43,7 +44,7 @@ def git_trigger(): target_branch = request.json["pull_request"]["base"]["ref"] commit = request.json["pull_request"]["head"]["sha"] committer = request.json["sender"]["login"] - job = trigger.enqueue(repo=repo, branch=current_branch, commit=commit, committer=committer, triggered=False) + job = trigger.enqueue(repo=repo, branch=current_branch, commit=commit, committer=committer, target_branch=target_branch) if job: return HTTPResponse(job.get_id(), 201) return HTTPResponse("Nothing to be done", 200) @@ -65,7 +66,7 @@ def run_trigger(): return HTTPResponse( f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) - job = trigger.enqueue(run_id=run_id) + job = trigger.enqueue(run_id=run_id, triggered=True) return HTTPResponse(job.get_id(), 200) repo = request.json.get("repo") @@ -80,7 +81,7 @@ def run_trigger(): f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 ) if last_commit: - job = trigger.enqueue(repo=repo, branch=branch, commit=last_commit, committer=committer) + job = trigger.enqueue(repo=repo, branch=branch, commit=last_commit, committer=committer, triggered=True) else: return HTTPResponse(f"Couldn't get last commit from this branch {branch}, please try again", 503) if job: diff --git a/docs/config/nosetests.yaml b/docs/config/nosetests.yaml index ff0a1e0d..985aca08 100644 --- a/docs/config/nosetests.yaml +++ b/docs/config/nosetests.yaml @@ -1,4 +1,12 @@ # For a repository named AhmedHanafy725/test_zeroci +run_on: + push: + branches: + - development + pull_request: + branches: + - development + jobs: - name: Generate Bin prerequisites: From bbf35dcfe9e5bdd957942a441f05f924fc6c587f Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 22 Dec 2020 12:11:40 +0200 Subject: [PATCH 05/13] Disable schedules apis Fix validation for run_on Fix trigger api Fix results api --- backend/actions/trigger.py | 17 ++++++++++++----- backend/actions/validator.py | 32 ++++++++++++++++---------------- backend/apis/results.py | 2 +- backend/apis/trigger.py | 7 +++++-- backend/zeroci.py | 2 +- 5 files changed, 35 insertions(+), 25 deletions(-) diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 9b65986e..0461f428 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -10,14 +10,19 @@ from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue -from models.run import Run -from actions.validator import Validator +from actions.reporter import Reporter from actions.runner import Runner +from actions.validator import Validator + +reporter = Reporter() +runner = Runner() redis = Redis() q = Queue(connection=redis) PENDING = "pending" +ERROR = "error" + ERROR = "error" LOG_TYPE = "log" @@ -58,8 +63,8 @@ def enqueue(self, repo="", branch="", target_branch="", commit="", committer="", run , run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) return self._trigger(repo_config=config, run=run, run_id=run_id) - push = config["run_on"]["push"] - pull_request = config["run_on"]["pull_request"] + push = config["run_on"].get("push") + pull_request = config["run_on"].get("pull_request") if push: trigger_branches = push["branches"] if branch and branch in trigger_branches: @@ -136,12 +141,14 @@ def _trigger(self, repo_config, run, run_id): link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" vcs_obj = VCSFactory().get_cvn(repo=run.repo) vcs_obj.status_send(status=PENDING, link=link, commit=run.commit) - job = q.enqueue_call(func=Runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) + job = q.enqueue_call(func=runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) return job return def _report(self, msg, run, run_id): redis.rpush(run_id, msg) run.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) + run.status = ERROR run.save() + reporter.report(run_id=run_id, run_obj=run) return diff --git a/backend/actions/validator.py b/backend/actions/validator.py index 1c191fb3..9d2f4651 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -145,31 +145,31 @@ def _validate_run_on(self, run_on): msg = "push should have branches as a key" else: branches = push.get("branches") - if branches and not isinstance(branches, list): - msg = "branches shouldn't be empty and it should be of contain list of the branches" + if not branches: + msg = "branches on push shouldn't be empty" else: - for branch in branches: - if not isinstance(branch, str): - msg = "branches should be list of str" + if not isinstance(branches, list): + msg = "branches shouldn't be empty and it should be of contain list of the branches" + else: + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" if pull_request: if not isinstance(pull_request, dict): msg = "pull_request should have branches as a key" else: branches = pull_request.get("branches") - if branches and not isinstance(branches, list): - msg = "branches shouldn't be empty and it should be of contain list of the branches" + if not branches: + msg = "branches on pull_request shouldn't be empty" else: - for branch in branches: - if not isinstance(branch, str): - msg = "branches should be list of str" + if not isinstance(branches, list): + msg = "branches shouldn't be empty and it should be of contain list of the branches" + else: + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" return msg - # def _report(self, run_id, run_obj, msg): - # msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" - # redis.rpush(run_id, msg) - # run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) - # run_obj.save() - def validate_yaml(self, config): jobs = config.get("jobs") if not jobs: diff --git a/backend/apis/results.py b/backend/apis/results.py index 41c927af..30e06bf0 100644 --- a/backend/apis/results.py +++ b/backend/apis/results.py @@ -36,7 +36,7 @@ def result(repo): run_id = request.query.get("id") if run_id: - run = Run.get(run_id=id) + run = Run.get(run_id=run_id) live = True if run.status == PENDING else False return json.dumps({"live": live, "result": run.result}) diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 3f3f6306..4ceee9e1 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -67,7 +67,10 @@ def run_trigger(): f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) job = trigger.enqueue(run_id=run_id, triggered=True) - return HTTPResponse(job.get_id(), 200) + if job: + return HTTPResponse(job.get_id(), 200) + else: + return HTTPResponse("", 200) repo = request.json.get("repo") branch = request.json.get("branch") @@ -86,4 +89,4 @@ def run_trigger(): return HTTPResponse(f"Couldn't get last commit from this branch {branch}, please try again", 503) if job: return HTTPResponse(job.get_id(), 200) - return HTTPResponse("Wrong data", 400) + return HTTPResponse("", 200) diff --git a/backend/zeroci.py b/backend/zeroci.py index 9bf96905..4d10051a 100644 --- a/backend/zeroci.py +++ b/backend/zeroci.py @@ -8,7 +8,7 @@ import apis.config import apis.login import apis.results -import apis.schedule +# import apis.schedule import apis.trigger import apis.websockets import apis.default From 2dbf7bb6111682ce5b672e6c8b1137f1d64aece4 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 22 Dec 2020 13:34:01 +0200 Subject: [PATCH 06/13] Use zeroci workers to handle trigger apis --- backend/actions/trigger.py | 5 +++-- backend/apis/trigger.py | 26 ++++++++++++++------------ backend/health/health_check.py | 11 +++++++++++ backend/health/health_recover.py | 4 ++++ backend/zeroci_worker.py | 11 +++++++++++ install/startup.sh | 1 + 6 files changed, 44 insertions(+), 14 deletions(-) create mode 100644 backend/zeroci_worker.py diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 0461f428..c9d5910d 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -19,7 +19,7 @@ runner = Runner() redis = Redis() -q = Queue(connection=redis) +q = Queue(connection=redis, name="default") PENDING = "pending" ERROR = "error" @@ -44,7 +44,7 @@ def _load_config(self, repo, commit): return False, "", msg - def enqueue(self, repo="", branch="", target_branch="", commit="", committer="", run_id=None, triggered=False): + def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", run_id=None, triggered=False): if run_id: run = Run.get(run_id=run_id) repo = run.repo @@ -141,6 +141,7 @@ def _trigger(self, repo_config, run, run_id): link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" vcs_obj = VCSFactory().get_cvn(repo=run.repo) vcs_obj.status_send(status=PENDING, link=link, commit=run.commit) + #TODO: before triggering, check that there is not a run with same commit and in state pending. job = q.enqueue_call(func=runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) return job return diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 4ceee9e1..ec0f0ab6 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -1,23 +1,27 @@ import json -from apis.base import app, check_configs, user +from actions.trigger import Trigger from bottle import HTTPResponse, redirect, request from models.initial_config import InitialConfig from models.run import Run from packages.vcs.vcs import VCSFactory -from actions.trigger import Trigger +from redis import Redis +from rq import Queue + +from apis.base import app, check_configs, user trigger = Trigger() PENDING = "pending" - +redis = Redis() +q = Queue(connection=redis, name="zeroci") @app.route("/git_trigger", method=["POST"]) @check_configs def git_trigger(): """Trigger the test when a post request is sent from a repo's webhook. """ - # TODO: handle the case of running from push and pull request to not run it twice. + #TODO: make payload validation before work on it. configs = InitialConfig() if request.headers.get("Content-Type") == "application/json": job = "" @@ -33,18 +37,18 @@ def git_trigger(): committer = request.json["pusher"]["login"] branch_exist = not commit.startswith("000000") if branch_exist: - job = trigger.enqueue(repo=repo, branch=branch, commit=commit, committer=committer) + job = q.enqueue_call(trigger.enqueue, args=(repo, branch, commit, committer, "", None, False), result_ttl=5000, timeout=20000) # pull case # TODO: Handle the request for gitea. elif request.json.get("pull_request"): if request.json.get("action") in ["opened", "synchronize"]: repo = request.json["pull_request"]["head"]["repo"]["full_name"] - current_branch = request.json["pull_request"]["head"]["ref"] + branch = request.json["pull_request"]["head"]["ref"] target_branch = request.json["pull_request"]["base"]["ref"] commit = request.json["pull_request"]["head"]["sha"] committer = request.json["sender"]["login"] - job = trigger.enqueue(repo=repo, branch=current_branch, commit=commit, committer=committer, target_branch=target_branch) + job = q.enqueue_call(trigger.enqueue, args=(repo, branch, commit, committer, target_branch, None, False), result_ttl=5000, timeout=20000) if job: return HTTPResponse(job.get_id(), 201) return HTTPResponse("Nothing to be done", 200) @@ -66,11 +70,9 @@ def run_trigger(): return HTTPResponse( f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) - job = trigger.enqueue(run_id=run_id, triggered=True) + job = q.enqueue_call(trigger.enqueue, args=("", "", "", "", "", run_id, True), result_ttl=5000, timeout=20000) if job: return HTTPResponse(job.get_id(), 200) - else: - return HTTPResponse("", 200) repo = request.json.get("repo") branch = request.json.get("branch") @@ -84,9 +86,9 @@ def run_trigger(): f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 ) if last_commit: - job = trigger.enqueue(repo=repo, branch=branch, commit=last_commit, committer=committer, triggered=True) + job = q.enqueue_call(trigger.enqueue, args=(repo, branch, last_commit, committer, "", None, True), result_ttl=5000, timeout=20000) else: return HTTPResponse(f"Couldn't get last commit from this branch {branch}, please try again", 503) if job: return HTTPResponse(job.get_id(), 200) - return HTTPResponse("", 200) + return HTTPResponse("Wrong data", 400) diff --git a/backend/health/health_check.py b/backend/health/health_check.py index 556f0832..ac4fa549 100644 --- a/backend/health/health_check.py +++ b/backend/health/health_check.py @@ -49,6 +49,17 @@ def test_workers(self): if not pid: recover.worker(i) + def test_zeroci_workers(self): + """Check rq workers are up. + """ + pids = self.get_process_pid("python3 zeroci_worker") + zeroci_workers = len(pids) + if zeroci_workers < 2: + for i in range(1, 6): + pid = self.get_process_pid(f"python3 zeroci_worker{i}") + if not pid: + recover.zeroci_worker(i) + def test_schedule(self): """Check rq schedule is up. """ diff --git a/backend/health/health_recover.py b/backend/health/health_recover.py index a69ed2f1..65495b61 100644 --- a/backend/health/health_recover.py +++ b/backend/health/health_recover.py @@ -16,6 +16,10 @@ def redis(self): def worker(self, wid): cmd = f"/bin/bash -c 'cd {PATH}; python3 worker{wid}.py &>> worker_{wid}.log &'" self.execute_cmd(cmd=cmd, timeout=TIMEOUT) + + def zeroci_worker(self, wid): + cmd = f"/bin/bash -c 'cd {PATH}; python3 zeroci_worker{wid}.py &>> zeroci_worker_{wid}.log &'" + self.execute_cmd(cmd=cmd, timeout=TIMEOUT) def scheduler(self): cmd = f"/bin/bash -c 'cd {PATH}; rqscheduler &>> schedule.log &'" diff --git a/backend/zeroci_worker.py b/backend/zeroci_worker.py new file mode 100644 index 00000000..2c1d17de --- /dev/null +++ b/backend/zeroci_worker.py @@ -0,0 +1,11 @@ +import os + +from redis import Redis +from rq import Worker, Queue, Connection + +listen = ["zeroci"] + +if __name__ == "__main__": + with Connection(Redis()): + worker = Worker(list(map(Queue, listen))) + worker.work() diff --git a/install/startup.sh b/install/startup.sh index ad1c4fc1..11b66c3e 100644 --- a/install/startup.sh +++ b/install/startup.sh @@ -14,6 +14,7 @@ cp jsng_config.toml /root/.config/jumpscale/config.toml cd /sandbox/code/github/threefoldtech/zeroCI/backend redis-server /etc/redis/redis.conf for i in {1..5}; do cp worker.py worker$i.py; python3 worker$i.py &> worker_$i.log & done +for i in {1..2}; do cp zeroci_worker.py zeroci_worker$i.py; python3 zeroci_worker$i.py &> zeroci_worker_$i.log & done rqscheduler &> schedule.log & service cron start python3 zeroci.py From 0796957e1e1b0d33e1242c895f92db8e6ff8fcd1 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 22 Dec 2020 16:56:43 +0200 Subject: [PATCH 07/13] Add scheduler --- backend/actions/trigger.py | 48 +++++++++++++ backend/actions/validator.py | 59 +++++++++++++--- backend/apis/schedule.py | 115 -------------------------------- backend/models/schedule_info.py | 15 ----- 4 files changed, 96 insertions(+), 141 deletions(-) delete mode 100644 backend/apis/schedule.py delete mode 100644 backend/models/schedule_info.py diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index c9d5910d..34ecb2e8 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -7,6 +7,7 @@ from bottle import request from models.initial_config import InitialConfig from models.run import Run +from models.schedule_info import ScheduleInfo from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue @@ -14,9 +15,11 @@ from actions.reporter import Reporter from actions.runner import Runner from actions.validator import Validator +from rq_scheduler import Scheduler reporter = Reporter() runner = Runner() +scheduler = Scheduler(connection=Redis()) redis = Redis() q = Queue(connection=redis, name="default") @@ -65,6 +68,12 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", push = config["run_on"].get("push") pull_request = config["run_on"].get("pull_request") + manual = config["run_on"].get("manual") + schedule = config["run_on"].get("schedule") + + if repo and branch and not schedule: + schedule_name = f"{repo}_{branch}" + scheduler.cancel(schedule_name) if push: trigger_branches = push["branches"] if branch and branch in trigger_branches: @@ -75,6 +84,23 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", if target_branch and target_branch in target_branches: run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) return self._trigger(repo_config=config, run=run, run_id=run_id) + if manual and triggered: + trigger_branches = manual["branches"] + if branch and branch in trigger_branches: + run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + return self._trigger(repo_config=config, run=run, run_id=run_id) + if schedule: + schedule_branch = schedule["branch"] + cron = schedule["cron"] + schedule_name = f"{repo}_{branch}" + if branch == schedule_branch: + scheduler.cron( + cron_string=cron, + func=self._trigger_schedule, + args=[repo, branch], + id=schedule_name, + timeout=-1, + ) return def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False): @@ -146,7 +172,29 @@ def _trigger(self, repo_config, run, run_id): return job return + def _trigger_schedule(self, repo, branch): + vcs_obj = VCSFactory().get_cvn(repo=repo) + last_commit = vcs_obj.get_last_commit(branch=branch) + committer = vcs_obj.get_committer(commit=last_commit) + where = {"repo": repo, "branch": branch, "commit": last_commit, "status": PENDING} + exist_run = Run.get_objects(fields=["status"], **where) + run, run_id = self._prepare_run_object(repo=repo, branch=branch, commit=last_commit, committer=committer) + if exist_run: + msg = f"There is a running job from this commit {last_commit}" + return self._report(msg, run, run_id) + run.triggered_by = "ZeroCI Scheduler" + run.save() + status, config, msg = self._load_config(repo, last_commit) + if not status: + return self._report(msg, run, run_id) + validator = Validator() + valid, msg = validator.validate_yaml(config) + if not valid: + return self._report(msg, run, run_id) + runner.build_and_test(run_id, config) + def _report(self, msg, run, run_id): + msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" redis.rpush(run_id, msg) run.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) run.status = ERROR diff --git a/backend/actions/validator.py b/backend/actions/validator.py index 9d2f4651..9648d4c9 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -1,11 +1,6 @@ import requests -# from redis import Redis - -# redis = Redis() -ERROR = "error" -LOG_TYPE = "log" - - +from croniter import croniter +import traceback class Validator: def _validate_test_script(self, test_script): msg = "" @@ -137,8 +132,10 @@ def _validate_run_on(self, run_on): else: push = run_on.get("push") pull_request = run_on.get("pull_request") - if not any([push, pull_request]): - msg = "run_on should have push or pull_request as keys and at least one of them should be filled" + schedule = run_on.get("schedule") + manual = run_on.get("manual") + if not any([push, pull_request, schedule, manual]): + msg = "run_on should have push, pull_request, schedule or manual as keys and at least one of them should be filled" else: if push: if not isinstance(push, dict): @@ -149,7 +146,7 @@ def _validate_run_on(self, run_on): msg = "branches on push shouldn't be empty" else: if not isinstance(branches, list): - msg = "branches shouldn't be empty and it should be of contain list of the branches" + msg = "branches should be of contain list of the branches" else: for branch in branches: if not isinstance(branch, str): @@ -163,11 +160,51 @@ def _validate_run_on(self, run_on): msg = "branches on pull_request shouldn't be empty" else: if not isinstance(branches, list): - msg = "branches shouldn't be empty and it should be of contain list of the branches" + msg = "branches should be of contain list of the branches" else: for branch in branches: if not isinstance(branch, str): msg = "branches should be list of str" + + if manual: + if not isinstance(manual, dict): + msg = "manual should have branches as a key" + else: + branches = manual.get("branches") + if not branches: + msg = "branches on manual shouldn't be empty" + else: + if not isinstance(branches, list): + msg = "branches should be of contain list of the branches" + else: + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + + if schedule: + if not isinstance(schedule, dict): + msg = "schedule should have branch and cron as keys" + else: + branch = schedule.get("branch") + cron = schedule.get("cron") + if not branch: + msg = "branch on schedule shouldn't be empty" + else: + if not isinstance(branch, str): + msg = "branch should be str" + if not cron: + msg = "cron on schedule shouldn't be empty" + else: + if not isinstance(cron, str): + msg = "cron should be str" + else: + try: + croniter(cron) + except Exception as e: + msg = traceback.format_exc(e) + + + return msg def validate_yaml(self, config): diff --git a/backend/apis/schedule.py b/backend/apis/schedule.py deleted file mode 100644 index ce5c1f21..00000000 --- a/backend/apis/schedule.py +++ /dev/null @@ -1,115 +0,0 @@ -import json - -from redis import Redis -from rq import Queue -from rq_scheduler import Scheduler - -from actions.actions import Actions -from apis.base import app, check_configs, user -from bottle import HTTPResponse, abort, redirect, request -from models.schedule_info import ScheduleInfo -from models.scheduler_run import SchedulerRun - -actions = Actions() -q = Queue(connection=Redis()) -scheduler = Scheduler(connection=Redis()) -PENDING = "pending" - - -@app.route("/api/schedule", method=["GET", "POST", "DELETE"]) -@user -@check_configs -def schedule(): - if request.method == "GET": - schedule_name = request.query.get("schedule_name") - if schedule_name: - schedule_info = ScheduleInfo.get_by_name(name=schedule_name) - info = { - "schedule_name": schedule_name, - "install": schedule_info.install, - "script": schedule_info.script, - "prerequisites": schedule_info.prerequisites, - "run_time": schedule_info.run_time, - "created_by": schedule_info.created_by, - } - return json.dumps(info) - - schedules_names = ScheduleInfo.list_all() - return json.dumps(schedules_names) - - if request.headers.get("Content-Type") == "application/json": - if request.method == "POST": - data = ["schedule_name", "run_time", "prerequisites", "install", "script", "bin_path"] - job = {} - for item in data: - value = request.json.get(item) - if not value: - if item == "bin_path": - continue - return HTTPResponse(f"{item} should have a value", 400) - elif item is "script" and not isinstance(value, list): - return HTTPResponse(f"{item} should be str or list", 400) - else: - job[item] = value - - created_by = request.environ.get("beaker.session").get("username").strip(".3bot") - job["created_by"] = created_by - - if job["schedule_name"] in ScheduleInfo.list_all(): - return HTTPResponse(f"Schedule name {job['schedule_name']} is already used", 400) - - schedule_info = ScheduleInfo(**job) - schedule_info.save() - try: - scheduler.cron( - cron_string=job["run_time"], - func=actions.schedule_run, - args=[job,], - run_id=job["schedule_name"], - timeout=-1, - ) - except: - return HTTPResponse("Wrong time format should be like (0 * * * *)", 400) - return HTTPResponse("Added", 201) - else: - schedule_name = request.json.get("schedule_name") - schedule_info = ScheduleInfo.get_by_name(name=schedule_name) - schedule_info.delete() - scheduler.cancel(schedule_name) - return HTTPResponse("Removed", 200) - return abort(400) - - -@app.route("/api/schedule_trigger", method=["POST", "GET"]) -@user -@check_configs -def schedule_trigger(): - if request.method == "GET": - redirect("/") - - if request.headers.get("Content-Type") == "application/json": - schedule_name = request.json.get("schedule_name") - - where = {"schedule_name": schedule_name} - runs = SchedulerRun.get_objects(fields=["status"], order_by="timestamp", asc=False, **where) - if runs and runs[0]["status"] == PENDING: - return HTTPResponse( - f"There is a running job from this schedule {schedule_name}, please try again after this run finishes", - 503, - ) - if schedule_name not in ScheduleInfo.list_all(): - return HTTPResponse(f"Schedule name {schedule_name} is not found", 400) - - schedule_info = ScheduleInfo.get_by_name(name=schedule_name) - job = { - "schedule_name": schedule_name, - "prerequisites": schedule_info.prerequisites, - "install": schedule_info.install, - "script": schedule_info.script, - "triggered_by": request.environ.get("beaker.session").get("username").strip(".3bot"), - "bin_path": schedule_info.bin_path, - } - job = q.enqueue_call(func=actions.schedule_run, args=(job,), result_ttl=5000, timeout=20000,) - if job: - return HTTPResponse(job.get_id(), 200) - return HTTPResponse("Wrong data", 400) diff --git a/backend/models/schedule_info.py b/backend/models/schedule_info.py deleted file mode 100644 index 09d4a3a9..00000000 --- a/backend/models/schedule_info.py +++ /dev/null @@ -1,15 +0,0 @@ -from .base import fields, ModelFactory, Document, StoredFactory - - -class ScheduleInfoModel(Document): - run_time = fields.String(required=True) - branch = fields.String(required=True) - created_by = fields.String(required=True) - - -class ScheduleInfo(ModelFactory): - _model = StoredFactory(ScheduleInfoModel) - - def __new__(cls, **kwargs): - name = kwargs["schedule_name"] - return cls._model.new(name=name, **kwargs) From 2cf60f81587c655db450e22ab63f01219fc297cd Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 22 Dec 2020 16:57:35 +0200 Subject: [PATCH 08/13] Use uuid instead of timestamp for run id --- backend/models/base.py | 11 +++-------- backend/models/run.py | 4 ++-- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/backend/models/base.py b/backend/models/base.py index d1e70eb7..6effaca5 100644 --- a/backend/models/base.py +++ b/backend/models/base.py @@ -4,7 +4,7 @@ class Document(Base): @property def run_id(self): - return self.instance_name.strip("model") + return self.instance_name @property def name(self): @@ -16,12 +16,7 @@ class ModelFactory: @classmethod def get(cls, run_id): - name = "model" + str(run_id) - return cls._model.find(name) - - @classmethod - def get_by_name(cls, name): - return cls._model.find(name) + return cls._model.find(run_id) @classmethod def list_all(cls): @@ -67,7 +62,7 @@ def get_objects(cls, fields, order_by=None, asc=True, **kwargs): obj_dict = {} for field in fields: obj_dict[field] = getattr(obj, field) - obj_dict["run_id"] = obj.instance_name.strip("model") + obj_dict["run_id"] = obj.instance_name results.append(obj_dict) if order_by: diff --git a/backend/models/run.py b/backend/models/run.py index 2c205598..1029bbcc 100644 --- a/backend/models/run.py +++ b/backend/models/run.py @@ -1,5 +1,5 @@ from .base import Document, ModelFactory, fields, StoredFactory - +import uuid class RunModel(Document): timestamp = fields.Integer(required=True, indexed=True) @@ -17,6 +17,6 @@ class Run(ModelFactory): _model = StoredFactory(RunModel) def __new__(cls, **kwargs): - name = "model" + str(int(kwargs["timestamp"] * 10 ** 6)) + name = uuid.uuid4().hex kwargs["timestamp"] = int(kwargs["timestamp"]) return cls._model.new(name=name, **kwargs) From fa61c4f81d02727a676013ca4cb6e00f88704b87 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Tue, 22 Dec 2020 17:06:19 +0200 Subject: [PATCH 09/13] Formatting --- backend/actions/runner.py | 25 ++++++++++--------------- backend/actions/trigger.py | 30 ++++++++++++++++++++---------- backend/actions/validator.py | 15 ++++++++------- backend/apis/config.py | 14 ++++++-------- backend/apis/results.py | 8 +++----- backend/apis/trigger.py | 31 ++++++++++++++++++++++++------- backend/deployment/container.py | 9 +++++---- backend/health/health_check.py | 15 +++++---------- backend/health/health_recover.py | 2 +- backend/models/run.py | 4 +++- backend/packages/vcs/vcs.py | 10 +++++++--- backend/utils/utils.py | 2 +- backend/zeroci.py | 1 - 13 files changed, 93 insertions(+), 73 deletions(-) diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 1655c48d..87c8a8a2 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -7,17 +7,17 @@ import redis import requests import yaml - -from actions.validator import Validator from deployment.container import Container from kubernetes.client import V1EnvVar from models.initial_config import InitialConfig -from models.run_config import RunConfig from models.run import Run +from models.run_config import RunConfig from packages.vcs.vcs import VCSFactory -from actions.reporter import Reporter from utils.utils import Utils +from actions.reporter import Reporter +from actions.validator import Validator + container = Container() reporter = Reporter() utils = Utils() @@ -39,8 +39,7 @@ class Runner: run_obj = None def test_run(self, job): - """Runs tests and store the result in DB. - """ + """Runs tests and store the result in DB.""" for line in job["script"]: if line.get("type") == "neph": finished = self.neph_run(job_name=job["name"], line=line) @@ -106,8 +105,7 @@ def neph_run(self, job_name, line): return True def build(self, job, clone_details, job_number): - """Create VM with the required prerequisties and run installation steps to get it ready for running tests. - """ + """Create VM with the required prerequisties and run installation steps to get it ready for running tests.""" env = self._get_run_env() deployed = container.deploy(env=env, prerequisites=job["prerequisites"], repo_path=clone_details["remote_path"]) installed = False @@ -138,8 +136,7 @@ def build(self, job, clone_details, job_number): return deployed, installed def cal_status(self): - """Calculate the status of the whole tests result has been stored on the BD's id. - """ + """Calculate the status of the whole tests result has been stored on the BD's id.""" status = SUCCESS for result in self.run_obj.result: if result["status"] != SUCCESS: @@ -148,8 +145,7 @@ def cal_status(self): self.run_obj.save() def _get_run_env(self): - """Get run environment variables. - """ + """Get run environment variables.""" name = self.run_obj.repo run_config = RunConfig(name=name) run_env = run_config.env @@ -160,8 +156,7 @@ def _get_run_env(self): return env def repo_clone_details(self): - """Clone repo. - """ + """Clone repo.""" configs = InitialConfig() repo_remote_path = os.path.join(self._REPOS_DIR, self.run_obj.repo) clone_url = urljoin(configs.vcs_host, f"{self.run_obj.repo}.git") @@ -210,7 +205,7 @@ def _set_bin(self): def build_and_test(self, run_id, repo_config): """Builds, runs tests, calculates status and gives report on telegram and your version control system. - + :param id: DB's id of this run details. :type id: str :param schedule_name: it will have a value if the run is scheduled. diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 34ecb2e8..6d513051 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -11,11 +11,11 @@ from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue +from rq_scheduler import Scheduler from actions.reporter import Reporter from actions.runner import Runner from actions.validator import Validator -from rq_scheduler import Scheduler reporter = Reporter() runner = Runner() @@ -44,7 +44,7 @@ def _load_config(self, repo, commit): msg = traceback.format_exc() else: msg = "zeroCI.yaml is not found on the repository's home" - + return False, "", msg def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", run_id=None, triggered=False): @@ -54,16 +54,20 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", commit = run.commit status, config, msg = self._load_config(repo, commit) if not status: - run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered + ) return self._report(msg, run, run_id) validator = Validator() valid, msg = validator.validate_yaml(config) if not valid: - run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered + ) return self._report(msg, run, run_id) - + if run_id: - run , run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) + run, run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) return self._trigger(repo_config=config, run=run, run_id=run_id) push = config["run_on"].get("push") @@ -77,17 +81,23 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", if push: trigger_branches = push["branches"] if branch and branch in trigger_branches: - run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) return self._trigger(repo_config=config, run=run, run_id=run_id) if pull_request: target_branches = pull_request["branches"] if target_branch and target_branch in target_branches: - run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) return self._trigger(repo_config=config, run=run, run_id=run_id) if manual and triggered: trigger_branches = manual["branches"] if branch and branch in trigger_branches: - run , run_id = self._prepare_run_object(repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) return self._trigger(repo_config=config, run=run, run_id=run_id) if schedule: schedule_branch = schedule["branch"] @@ -167,7 +177,7 @@ def _trigger(self, repo_config, run, run_id): link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" vcs_obj = VCSFactory().get_cvn(repo=run.repo) vcs_obj.status_send(status=PENDING, link=link, commit=run.commit) - #TODO: before triggering, check that there is not a run with same commit and in state pending. + # TODO: before triggering, check that there is not a run with same commit and in state pending. job = q.enqueue_call(func=runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) return job return diff --git a/backend/actions/validator.py b/backend/actions/validator.py index 9648d4c9..86c5036d 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -1,6 +1,9 @@ +import traceback + import requests from croniter import croniter -import traceback + + class Validator: def _validate_test_script(self, test_script): msg = "" @@ -151,7 +154,7 @@ def _validate_run_on(self, run_on): for branch in branches: if not isinstance(branch, str): msg = "branches should be list of str" - if pull_request: + if pull_request: if not isinstance(pull_request, dict): msg = "pull_request should have branches as a key" else: @@ -166,7 +169,7 @@ def _validate_run_on(self, run_on): if not isinstance(branch, str): msg = "branches should be list of str" - if manual: + if manual: if not isinstance(manual, dict): msg = "manual should have branches as a key" else: @@ -180,8 +183,8 @@ def _validate_run_on(self, run_on): for branch in branches: if not isinstance(branch, str): msg = "branches should be list of str" - - if schedule: + + if schedule: if not isinstance(schedule, dict): msg = "schedule should have branch and cron as keys" else: @@ -202,9 +205,7 @@ def _validate_run_on(self, run_on): croniter(cron) except Exception as e: msg = traceback.format_exc(e) - - return msg def validate_yaml(self, config): diff --git a/backend/apis/config.py b/backend/apis/config.py index c9781ff0..45fdcfda 100644 --- a/backend/apis/config.py +++ b/backend/apis/config.py @@ -2,21 +2,20 @@ import sys import requests -from telegram import Bot -from telegram.error import BadRequest, InvalidToken, Unauthorized - -from apis.base import admin, app, check_configs, user from bottle import HTTPResponse, abort, request from models.initial_config import InitialConfig from models.run_config import RunConfig from packages.vcs.vcs import VCSFactory +from telegram import Bot +from telegram.error import BadRequest, InvalidToken, Unauthorized + +from apis.base import admin, app, check_configs, user @app.route("/api/telegram_config", method=["GET", "POST"]) @admin def validate_telegam(): - """Validate telegram token and chat ID - """ + """Validate telegram token and chat ID""" configs = InitialConfig() confs = ["chat_id", "bot_token"] conf_dict = {} @@ -51,8 +50,7 @@ def validate_telegam(): @app.route("/api/vcs_config", method=["GET", "POST"]) @admin def vcs_config(): - """Initial configuration for the ci before start working. - """ + """Initial configuration for the ci before start working.""" configs = InitialConfig() confs = ["domain", "vcs_host", "vcs_token"] conf_dict = {} diff --git a/backend/apis/results.py b/backend/apis/results.py index 30e06bf0..160c7570 100644 --- a/backend/apis/results.py +++ b/backend/apis/results.py @@ -1,11 +1,11 @@ import json -from apis.base import app, check_configs from bottle import abort, redirect, request, static_file from models.initial_config import InitialConfig from models.run import Run from packages.vcs.vcs import VCSFactory +from apis.base import app, check_configs SUCCESS = "success" FAILURE = "failure" @@ -16,8 +16,7 @@ @app.route("/api/") @check_configs def home(): - """Return repos and schedules which are running on the server. - """ + """Return repos and schedules which are running on the server.""" configs = InitialConfig() result = {"repos": configs.repos} return json.dumps(result) @@ -57,8 +56,7 @@ def result(repo): @app.route("/status") @check_configs def status(): - """Returns repo's branch or schedule status for your version control system. - """ + """Returns repo's branch or schedule status for your version control system.""" repo = request.query.get("repo") branch = request.query.get("branch") result = request.query.get("result") # to return the run result diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index ec0f0ab6..3aa248a6 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -16,12 +16,12 @@ redis = Redis() q = Queue(connection=redis, name="zeroci") + @app.route("/git_trigger", method=["POST"]) @check_configs def git_trigger(): - """Trigger the test when a post request is sent from a repo's webhook. - """ - #TODO: make payload validation before work on it. + """Trigger the test when a post request is sent from a repo's webhook.""" + # TODO: make payload validation before work on it. configs = InitialConfig() if request.headers.get("Content-Type") == "application/json": job = "" @@ -37,7 +37,12 @@ def git_trigger(): committer = request.json["pusher"]["login"] branch_exist = not commit.startswith("000000") if branch_exist: - job = q.enqueue_call(trigger.enqueue, args=(repo, branch, commit, committer, "", None, False), result_ttl=5000, timeout=20000) + job = q.enqueue_call( + trigger.enqueue, + args=(repo, branch, commit, committer, "", None, False), + result_ttl=5000, + timeout=20000, + ) # pull case # TODO: Handle the request for gitea. @@ -48,7 +53,12 @@ def git_trigger(): target_branch = request.json["pull_request"]["base"]["ref"] commit = request.json["pull_request"]["head"]["sha"] committer = request.json["sender"]["login"] - job = q.enqueue_call(trigger.enqueue, args=(repo, branch, commit, committer, target_branch, None, False), result_ttl=5000, timeout=20000) + job = q.enqueue_call( + trigger.enqueue, + args=(repo, branch, commit, committer, target_branch, None, False), + result_ttl=5000, + timeout=20000, + ) if job: return HTTPResponse(job.get_id(), 201) return HTTPResponse("Nothing to be done", 200) @@ -70,7 +80,9 @@ def run_trigger(): return HTTPResponse( f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) - job = q.enqueue_call(trigger.enqueue, args=("", "", "", "", "", run_id, True), result_ttl=5000, timeout=20000) + job = q.enqueue_call( + trigger.enqueue, args=("", "", "", "", "", run_id, True), result_ttl=5000, timeout=20000 + ) if job: return HTTPResponse(job.get_id(), 200) @@ -86,7 +98,12 @@ def run_trigger(): f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 ) if last_commit: - job = q.enqueue_call(trigger.enqueue, args=(repo, branch, last_commit, committer, "", None, True), result_ttl=5000, timeout=20000) + job = q.enqueue_call( + trigger.enqueue, + args=(repo, branch, last_commit, committer, "", None, True), + result_ttl=5000, + timeout=20000, + ) else: return HTTPResponse(f"Couldn't get last commit from this branch {branch}, please try again", 503) if job: diff --git a/backend/deployment/container.py b/backend/deployment/container.py index 2224cb11..c76bd038 100644 --- a/backend/deployment/container.py +++ b/backend/deployment/container.py @@ -5,7 +5,6 @@ import paramiko import redis import yaml - from kubernetes import client, config from kubernetes.stream import stream from utils.utils import Utils @@ -220,7 +219,10 @@ def create_pod(self, env, prerequisites, repo_path): resources=resources, ) spec = client.V1PodSpec( - volumes=vols, containers=[test_container, helper_container], hostname=self.name, restart_policy="Never", + volumes=vols, + containers=[test_container, helper_container], + hostname=self.name, + restart_policy="Never", ) meta = client.V1ObjectMeta(name=self.name, namespace=self.namespace, labels={"app": self.name}) pod = client.V1Pod(api_version="v1", kind="Pod", metadata=meta, spec=spec) @@ -270,8 +272,7 @@ def wait_for_container(self): break def delete(self): - """Delete the container after finishing test. - """ + """Delete the container after finishing test.""" try: self.client.delete_namespaced_pod(name=self.name, namespace=self.namespace) self.client.delete_namespaced_service(name=self.name, namespace=self.namespace) diff --git a/backend/health/health_check.py b/backend/health/health_check.py index ac4fa549..dd391803 100644 --- a/backend/health/health_check.py +++ b/backend/health/health_check.py @@ -18,15 +18,13 @@ def get_process_pid(self, name): return pids def test_zeroci_server(self): - """Check zeroci server is still running - """ + """Check zeroci server is still running""" pid = self.get_process_pid("python3 zeroci") if not pid: recover.zeroci() def test_redis(self): - """Check redis is still running. - """ + """Check redis is still running.""" pid = self.get_process_pid("redis") if not pid: recover.redis() @@ -39,8 +37,7 @@ def test_redis(self): recover.redis() def test_workers(self): - """Check rq workers are up. - """ + """Check rq workers are up.""" pids = self.get_process_pid("python3 worker") workers = len(pids) if workers < 5: @@ -50,8 +47,7 @@ def test_workers(self): recover.worker(i) def test_zeroci_workers(self): - """Check rq workers are up. - """ + """Check rq workers are up.""" pids = self.get_process_pid("python3 zeroci_worker") zeroci_workers = len(pids) if zeroci_workers < 2: @@ -61,8 +57,7 @@ def test_zeroci_workers(self): recover.zeroci_worker(i) def test_schedule(self): - """Check rq schedule is up. - """ + """Check rq schedule is up.""" pid = self.get_process_pid("rqscheduler") if not pid: recover.scheduler() diff --git a/backend/health/health_recover.py b/backend/health/health_recover.py index 65495b61..db252256 100644 --- a/backend/health/health_recover.py +++ b/backend/health/health_recover.py @@ -16,7 +16,7 @@ def redis(self): def worker(self, wid): cmd = f"/bin/bash -c 'cd {PATH}; python3 worker{wid}.py &>> worker_{wid}.log &'" self.execute_cmd(cmd=cmd, timeout=TIMEOUT) - + def zeroci_worker(self, wid): cmd = f"/bin/bash -c 'cd {PATH}; python3 zeroci_worker{wid}.py &>> zeroci_worker_{wid}.log &'" self.execute_cmd(cmd=cmd, timeout=TIMEOUT) diff --git a/backend/models/run.py b/backend/models/run.py index 1029bbcc..4b1bd1ad 100644 --- a/backend/models/run.py +++ b/backend/models/run.py @@ -1,6 +1,8 @@ -from .base import Document, ModelFactory, fields, StoredFactory import uuid +from .base import Document, ModelFactory, StoredFactory, fields + + class RunModel(Document): timestamp = fields.Integer(required=True, indexed=True) repo = fields.String(required=True) diff --git a/backend/packages/vcs/vcs.py b/backend/packages/vcs/vcs.py index 4fbc302c..e7050489 100644 --- a/backend/packages/vcs/vcs.py +++ b/backend/packages/vcs/vcs.py @@ -3,11 +3,10 @@ from abc import ABCMeta, abstractmethod from urllib.parse import urljoin +import giteapy from github import Github as GH from github import UnknownObjectException from github.GithubException import GithubException - -import giteapy from models.initial_config import InitialConfig @@ -272,7 +271,12 @@ def _get_gitea_cl(): @VCSInterface.call_trial def status_send( - self, status, link, commit, description="ZeroCI for testing", context="continuous-integration/ZeroCI", + self, + status, + link, + commit, + description="ZeroCI for testing", + context="continuous-integration/ZeroCI", ): body = {"context": context, "description": description, "state": status, "target_url": link} self.repo_obj.repo_create_status(self.owner, self.repo_name, commit, body=body) diff --git a/backend/utils/utils.py b/backend/utils/utils.py index a952b248..ed03c817 100644 --- a/backend/utils/utils.py +++ b/backend/utils/utils.py @@ -89,7 +89,7 @@ def xml_parse(self, path, line): def load_file(self, path): """Load file content. - + :param path: path to file. :type path: str :return: file content diff --git a/backend/zeroci.py b/backend/zeroci.py index 4d10051a..4ddcb05e 100644 --- a/backend/zeroci.py +++ b/backend/zeroci.py @@ -8,7 +8,6 @@ import apis.config import apis.login import apis.results -# import apis.schedule import apis.trigger import apis.websockets import apis.default From 80ad0c71bb51c2ccff8853aa25f9bceece21d5cd Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Wed, 23 Dec 2020 12:35:39 +0200 Subject: [PATCH 10/13] Put all constant on one place --- backend/actions/reporter.py | 10 ++--- backend/actions/runner.py | 60 +++++++-------------------- backend/actions/trigger.py | 22 ++++------ backend/apis/base.py | 3 +- backend/apis/default.py | 5 ++- backend/apis/login.py | 6 +-- backend/apis/results.py | 6 +-- backend/apis/trigger.py | 6 +-- backend/apis/websockets.py | 7 ++-- backend/deployment/container.py | 5 ++- backend/health/cleanup.py | 8 ++-- backend/health/health_check.py | 2 +- backend/packages/telegram/telegram.py | 4 +- backend/packages/vcs/vcs.py | 7 +++- backend/utils/constants.py | 14 +++++++ 15 files changed, 68 insertions(+), 97 deletions(-) diff --git a/backend/actions/reporter.py b/backend/actions/reporter.py index f3c6c33f..78168d98 100644 --- a/backend/actions/reporter.py +++ b/backend/actions/reporter.py @@ -1,15 +1,13 @@ import json from urllib.parse import urljoin -from redis import Redis - from models.initial_config import InitialConfig from packages.telegram.telegram import Telegram from packages.vcs.vcs import VCSFactory +from redis import Redis +from utils.constants import FAILURE, SUCCESS -r = Redis() -SUCCESS = "success" -FAILURE = "failure" +redis = Redis() class Reporter: @@ -46,7 +44,7 @@ def report(self, run_id, run_obj): "triggered_by": triggered_by, "run_id": run_id, } - r.publish("zeroci_status", json.dumps(data)) + redis.publish("zeroci_status", json.dumps(data)) vcs_obj = VCSFactory().get_cvn(repo=run_obj.repo) vcs_obj.status_send(status=run_obj.status, link=link, commit=run_obj.commit) telegram.send_msg( diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 87c8a8a2..6ab02873 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -4,7 +4,6 @@ from datetime import datetime from urllib.parse import urljoin -import redis import requests import yaml from deployment.container import Container @@ -13,6 +12,8 @@ from models.run import Run from models.run_config import RunConfig from packages.vcs.vcs import VCSFactory +from redis import Redis +from utils.constants import * from utils.utils import Utils from actions.reporter import Reporter @@ -21,20 +22,10 @@ container = Container() reporter = Reporter() utils = Utils() -r = redis.Redis() - -SUCCESS = "success" -FAILURE = "failure" -ERROR = "error" -PENDING = "pending" -LOG_TYPE = "log" -TESTSUITE_TYPE = "testsuite" -NEPH_TYPE = "neph" +redis = Redis() class Runner: - _REPOS_DIR = "/zeroci/code/vcs_repos" - _BIN_DIR = "/zeroci/bin/" run_id = None run_obj = None @@ -76,7 +67,7 @@ def neph_run(self, job_name, line): working_dir = line["working_dir"] yaml_path = line["yaml_path"] neph_id = f"{self.run_id}:{job_name}:{line['name']}" - cmd = f"export NEPH_RUN_ID='{neph_id}' \n cd {working_dir} \n /zeroci/bin/neph -y {yaml_path} -m CI" + cmd = f"export NEPH_RUN_ID='{neph_id}' \n cd {working_dir} \n {BIN_DIR}neph -y {yaml_path} -m CI" response = container.execute_command(cmd=cmd, run_id=self.run_id) if response.returncode: status = FAILURE @@ -85,11 +76,11 @@ def neph_run(self, job_name, line): self.run_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": response.stdout}) self.run_obj.save() - for key in r.keys(): + for key in redis.keys(): key = key.decode() if key.startswith(f"neph:{self.run_id}:{job_name}:{line['name']}"): status = SUCCESS - logs = r.lrange(key, 0, -1) + logs = redis.lrange(key, 0, -1) all_logs = "" for log in logs: log = json.loads(log.decode()) @@ -116,7 +107,7 @@ def build(self, job, clone_details, job_number): if response.returncode: name = "{job_name}: Clone Repository".format(job_name=job["name"]) result = response.stdout - r.rpush(self.run_id, result) + redis.rpush(self.run_id, result) else: response = container.execute_command(cmd=job["install"], run_id=self.run_id) if response.returncode: @@ -127,7 +118,7 @@ def build(self, job, clone_details, job_number): else: name = "{job_name}: Deploy".format(job_name=job["name"]) result = "Couldn't deploy a container" - r.rpush(self.run_id, result) + redis.rpush(self.run_id, result) if not installed: self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": name, "content": result}) @@ -158,7 +149,7 @@ def _get_run_env(self): def repo_clone_details(self): """Clone repo.""" configs = InitialConfig() - repo_remote_path = os.path.join(self._REPOS_DIR, self.run_obj.repo) + repo_remote_path = os.path.join(REPOS_DIR, self.run_obj.repo) clone_url = urljoin(configs.vcs_host, f"{self.run_obj.repo}.git") cmd = """git clone {clone_url} {repo_remote_path} --branch {branch} cd {repo_remote_path} @@ -175,7 +166,7 @@ def repo_clone_details(self): def _prepare_bin_dirs(self, bin_remote_path): self.bin_name = bin_remote_path.split(os.path.sep)[-1] release = self.run_obj.commit[:7] - local_path = os.path.join(self._BIN_DIR, self.run_obj.repo, self.run_obj.branch) + local_path = os.path.join(BIN_DIR, self.run_obj.repo, self.run_obj.branch) bin_release = f"{self.bin_name}_{release}" bin_local_path = os.path.join(local_path, bin_release) if not os.path.exists(local_path): @@ -187,7 +178,7 @@ def _get_bin(self, bin_remote_path, job_number): if bin_remote_path and not job_number: bin_local_path = self._prepare_bin_dirs(bin_remote_path) bin_release = bin_local_path.split(os.path.sep)[-1] - bin_tmp_path = os.path.join(self._BIN_DIR, bin_release) + bin_tmp_path = os.path.join(BIN_DIR, bin_release) cmd = f"cp {bin_remote_path} {bin_tmp_path}" container.execute_command(cmd=cmd, run_id="", verbose=False) container.ssh_get_remote_file(remote_path=bin_tmp_path, local_path=bin_local_path) @@ -199,7 +190,7 @@ def _get_bin(self, bin_remote_path, job_number): def _set_bin(self): if self.run_obj.bin_release: bin_local_path = self._prepare_bin_dirs(self.bin_name) - bin_remote_path = os.path.join(self._BIN_DIR, self.bin_name) + bin_remote_path = os.path.join(BIN_DIR, self.bin_name) container.ssh_set_remote_file(remote_path=bin_remote_path, local_path=bin_local_path) container.ssh_command(f"chmod +x {bin_remote_path}") @@ -227,36 +218,13 @@ def build_and_test(self, run_id, repo_config): ).replace( " ", "" ) - r.rpush(self.run_id, log) + redis.rpush(self.run_id, log) deployed, installed = self.build(job=job, clone_details=clone_details, job_number=i) if deployed: if installed: worked = self.test_run(job=job) self._get_bin(bin_remote_path=job.get("bin_path"), job_number=i) container.delete() - r.rpush(self.run_id, "hamada ok") + redis.rpush(self.run_id, "hamada ok") self.cal_status() reporter.report(run_id=self.run_id, run_obj=self.run_obj) - - # def schedule_run(self, job): - # """Builds, runs tests, calculates status and gives report on telegram. - - # :param schedule_name: the name of the scheduled run. - # :type schedule_name: str - # :param script: the script that should run your schedule. - # :type script: str - # """ - # triggered_by = job.get("triggered_by", "ZeroCI Scheduler") - # data = { - # "status": PENDING, - # "timestamp": int(datetime.now().timestamp()), - # "schedule_name": job["schedule_name"], - # "triggered_by": triggered_by, - # "bin_release": None, - # } - # scheduler_run = SchedulerRun(**data) - # scheduler_run.save() - # id = str(scheduler_run.id) - # data["id"] = id - # r.publish("zeroci_status", json.dumps(data)) - # self.build_and_test(id=id, schedule_name=job["schedule_name"], script=job) diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 6d513051..f9d82081 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -12,6 +12,7 @@ from redis import Redis from rq import Queue from rq_scheduler import Scheduler +from utils.constants import BIN_DIR, ERROR, LOG_TYPE, PENDING from actions.reporter import Reporter from actions.runner import Runner @@ -19,17 +20,9 @@ reporter = Reporter() runner = Runner() -scheduler = Scheduler(connection=Redis()) - redis = Redis() +scheduler = Scheduler(connection=redis) q = Queue(connection=redis, name="default") -PENDING = "pending" -ERROR = "error" - - -ERROR = "error" -LOG_TYPE = "log" -BIN_DIR = "/zeroci/bin/" class Trigger: @@ -115,8 +108,7 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False): configs = InitialConfig() - status = PENDING - timestamp = datetime.now().timestamp() + timestamp = int(datetime.now().timestamp()) if run_id: # Triggered from id. run = Run.get(run_id=run_id) @@ -125,15 +117,15 @@ def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_i "timestamp": timestamp, "commit": run.commit, "committer": run.committer, - "status": status, + "status": PENDING, "repo": run.repo, "branch": run.branch, "triggered_by": triggered_by, "bin_release": None, "run_id": run_id, } - run.timestamp = int(timestamp) - run.status = status + run.timestamp = timestamp + run.status = PENDING run.result = [] run.triggered_by = triggered_by if run.bin_release: @@ -156,7 +148,7 @@ def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_i "timestamp": timestamp, "commit": commit, "committer": committer, - "status": status, + "status": PENDING, "repo": repo, "branch": branch, "triggered_by": triggered_by, diff --git a/backend/apis/base.py b/backend/apis/base.py index 5717de4e..d484be7f 100644 --- a/backend/apis/base.py +++ b/backend/apis/base.py @@ -1,6 +1,7 @@ +from functools import wraps + from bottle import Bottle, abort, redirect, request, response from models.initial_config import InitialConfig -from functools import wraps app = Bottle() LOGIN_URL = "/auth/login?provider=3bot" diff --git a/backend/apis/default.py b/backend/apis/default.py index ee59cf26..18c1c25f 100644 --- a/backend/apis/default.py +++ b/backend/apis/default.py @@ -1,7 +1,8 @@ -from apis.base import app, check_configs, admin from bottle import static_file +from utils.constants import BIN_DIR + +from apis.base import admin, app, check_configs -BIN_DIR = "/zeroci/bin/" STATIC_DIR = "../dist/static" INDEX_DIR = "../dist" diff --git a/backend/apis/login.py b/backend/apis/login.py index 10fc0735..4a09d869 100644 --- a/backend/apis/login.py +++ b/backend/apis/login.py @@ -4,13 +4,13 @@ import nacl import requests +from bottle import abort, redirect, request +from models.initial_config import InitialConfig from nacl.public import Box from nacl.signing import VerifyKey +from utils.utils import Utils from apis.base import app -from bottle import abort, redirect, request -from models.initial_config import InitialConfig -from utils.utils import Utils CALLBACK_URL = "/auth/3bot_callback" REDIRECT_URL = "https://login.threefold.me" diff --git a/backend/apis/results.py b/backend/apis/results.py index 160c7570..55293df4 100644 --- a/backend/apis/results.py +++ b/backend/apis/results.py @@ -4,14 +4,10 @@ from models.initial_config import InitialConfig from models.run import Run from packages.vcs.vcs import VCSFactory +from utils.constants import ERROR, FAILURE, PENDING, SUCCESS from apis.base import app, check_configs -SUCCESS = "success" -FAILURE = "failure" -ERROR = "error" -PENDING = "pending" - @app.route("/api/") @check_configs diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 3aa248a6..93252010 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -7,14 +7,12 @@ from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue +from utils.constants import PENDING from apis.base import app, check_configs, user trigger = Trigger() - -PENDING = "pending" -redis = Redis() -q = Queue(connection=redis, name="zeroci") +q = Queue(connection=Redis(), name="zeroci") @app.route("/git_trigger", method=["POST"]) diff --git a/backend/apis/websockets.py b/backend/apis/websockets.py index 2f5f915d..9a6d64c9 100644 --- a/backend/apis/websockets.py +++ b/backend/apis/websockets.py @@ -1,10 +1,11 @@ -from gevent import sleep -from redis import Redis import json -from apis.base import app from bottle import abort, request +from gevent import sleep from geventwebsocket import WebSocketError +from redis import Redis + +from apis.base import app redis = Redis() diff --git a/backend/deployment/container.py b/backend/deployment/container.py index c76bd038..691d6542 100644 --- a/backend/deployment/container.py +++ b/backend/deployment/container.py @@ -7,6 +7,7 @@ import yaml from kubernetes import client, config from kubernetes.stream import stream +from utils.constants import BIN_DIR from utils.utils import Utils TIMEOUT = 120 @@ -167,7 +168,7 @@ def get_remote_file(self, remote_path, local_path): def create_pod(self, env, prerequisites, repo_path): # zeroci vol - bin_mount_path = "/zeroci/bin" + bin_mount_path = BIN_DIR bin_vol_name = "bin-path" bin_vol = client.V1Volume(name=bin_vol_name, empty_dir={}) bin_vol_mount = client.V1VolumeMount(mount_path=bin_mount_path, name=bin_vol_name) @@ -210,7 +211,7 @@ def create_pod(self, env, prerequisites, repo_path): command=[ "/bin/sh", "-ce", - f"echo {ssh_key} > /root/.ssh/authorized_keys && cp /usr/local/bin/* /zeroci/bin/ \ + f"echo {ssh_key} > /root/.ssh/authorized_keys && cp /usr/local/bin/* {BIN_DIR} \ && service ssh start && sleep 3600", ], env=[non_interactive], diff --git a/backend/health/cleanup.py b/backend/health/cleanup.py index 776a5643..5bb47504 100644 --- a/backend/health/cleanup.py +++ b/backend/health/cleanup.py @@ -5,19 +5,17 @@ from datetime import datetime from pathlib import Path -from redis import Redis - from models.base import StoredFactory from models.scheduler_run import SchedulerRun from models.trigger_run import TriggerRun - +from redis import Redis REDIS_PATH = "/var/lib/redis" WHOOSH_PATH = "/root/.config/jumpscale/whoosh_indexes/" def remove(factory, days=30): - r = Redis() + redis = Redis() names = factory.list_all() for name in names: obj = factory.get(name) @@ -26,7 +24,7 @@ def remove(factory, days=30): time_diff = now_time - run_time if time_diff.days > days: factory.delete(name) - r.delete(obj.run_id) + redis.delete(obj.run_id) def get_size_in_giga_bytes(path): diff --git a/backend/health/health_check.py b/backend/health/health_check.py index dd391803..e60efa25 100644 --- a/backend/health/health_check.py +++ b/backend/health/health_check.py @@ -3,9 +3,9 @@ sys.path.append("/sandbox/code/github/threefoldtech/zeroCI/backend") from redis import Redis +from utils.utils import Utils from health_recover import Recover -from utils.utils import Utils recover = Recover() diff --git a/backend/packages/telegram/telegram.py b/backend/packages/telegram/telegram.py index 1f22a476..584f8922 100644 --- a/backend/packages/telegram/telegram.py +++ b/backend/packages/telegram/telegram.py @@ -1,10 +1,10 @@ import time from urllib.parse import urljoin -from telegram import Bot, ParseMode, InlineKeyboardMarkup, InlineKeyboardButton - from models.initial_config import InitialConfig +from telegram import Bot, InlineKeyboardButton, InlineKeyboardMarkup, ParseMode + RETRIES = 5 diff --git a/backend/packages/vcs/vcs.py b/backend/packages/vcs/vcs.py index e7050489..d9e54cd1 100644 --- a/backend/packages/vcs/vcs.py +++ b/backend/packages/vcs/vcs.py @@ -217,7 +217,7 @@ def create_hook(self, repo): repo = self.github_cl.get_repo(repo) hook_config = {"url": self.HOOK_URL, "content_type": "json"} try: - repo.create_hook(name="web", config=hook_config, events=["push"], active=True) + repo.create_hook(name="web", config=hook_config, events=["push", "pull_request"], active=True) except (UnknownObjectException, GithubException) as e: if e.status in [404, 403]: return False @@ -328,7 +328,10 @@ def create_hook(self, repo): return True config = giteapy.CreateHookOption( - active=True, config={"url": self.HOOK_URL, "content_type": "json"}, events=["push"], type="gitea" + active=True, + config={"url": self.HOOK_URL, "content_type": "json"}, + events=["push", "pull_request"], + type="gitea", ) try: self.repo_obj.repo_create_hook(owner, repo_name, body=config) diff --git a/backend/utils/constants.py b/backend/utils/constants.py index e69de29b..3ec5ecc0 100644 --- a/backend/utils/constants.py +++ b/backend/utils/constants.py @@ -0,0 +1,14 @@ +# Status +PENDING = "pending" +SUCCESS = "success" +FAILURE = "failure" +ERROR = "error" + +# Result types +LOG_TYPE = "log" +TESTSUITE_TYPE = "testsuite" +NEPH_TYPE = "neph" + +# Dirs +BIN_DIR = "/zeroci/bin/" +REPOS_DIR = "/zeroci/code/vcs_repos" From 519748b96839a581663c1cc540a52bc795ac4d6f Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Wed, 23 Dec 2020 16:15:04 +0200 Subject: [PATCH 11/13] Cleanup the usage --- backend/actions/reporter.py | 34 ++- backend/actions/runner.py | 101 +++++---- backend/actions/trigger.py | 220 ++++++++++-------- backend/actions/validator.py | 391 ++++++++++++++++---------------- backend/apis/login.py | 5 +- backend/apis/trigger.py | 10 +- backend/deployment/container.py | 82 ++++--- backend/packages/vcs/vcs.py | 17 +- backend/utils/utils.py | 7 +- 9 files changed, 481 insertions(+), 386 deletions(-) diff --git a/backend/actions/reporter.py b/backend/actions/reporter.py index 78168d98..ec45c5df 100644 --- a/backend/actions/reporter.py +++ b/backend/actions/reporter.py @@ -7,10 +7,31 @@ from redis import Redis from utils.constants import FAILURE, SUCCESS -redis = Redis() - class Reporter: + def __init__(self): + self._redis = None + self._telegram = None + self._vcs = None + + @property + def redis(self): + if not self._redis: + self._redis = Redis() + return self._redis + + @property + def telegram(self): + if not self._telegram: + self._telegram = Telegram() + return self._telegram + + @property + def vcs(self): + if not self._vcs: + self._vcs = VCSFactory().get_cvn() + return self._vcs + def report(self, run_id, run_obj): """Report the result to the commit status and Telegram chat. @@ -22,7 +43,6 @@ def report(self, run_id, run_obj): :param schedule_name: str """ configs = InitialConfig() - telegram = Telegram() bin_release = run_obj.bin_release triggered_by = run_obj.triggered_by msg = self.report_msg(status=run_obj.status) @@ -44,10 +64,10 @@ def report(self, run_id, run_obj): "triggered_by": triggered_by, "run_id": run_id, } - redis.publish("zeroci_status", json.dumps(data)) - vcs_obj = VCSFactory().get_cvn(repo=run_obj.repo) - vcs_obj.status_send(status=run_obj.status, link=link, commit=run_obj.commit) - telegram.send_msg( + self.redis.publish("zeroci_status", json.dumps(data)) + self.vcs._set_repo_obj(repo=run_obj.repo) + self.vcs.status_send(status=run_obj.status, link=link, commit=run_obj.commit) + self.telegram.send_msg( msg=msg, link=link, repo=run_obj.repo, diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 6ab02873..7cf9ed0b 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -19,37 +19,62 @@ from actions.reporter import Reporter from actions.validator import Validator -container = Container() -reporter = Reporter() -utils = Utils() -redis = Redis() - class Runner: - run_id = None - run_obj = None - - def test_run(self, job): + def __init__(self, run=None, run_id=None): + self.run_id = run_id + self.run_obj = run + self._redis = None + self._container = None + self._reporter = None + self._utils = None + + @property + def redis(self): + if not self._redis: + self._redis = Redis() + return self._redis + + @property + def container(self): + if not self._container: + self._container = Container() + return self._container + + @property + def reporter(self): + if not self._reporter: + self._reporter = Reporter() + return self._reporter + + @property + def utils(self): + if not self._utils: + self._utils = Utils() + return self._utils + + + def _test_run(self, job): """Runs tests and store the result in DB.""" for line in job["script"]: if line.get("type") == "neph": - finished = self.neph_run(job_name=job["name"], line=line) + finished = self._neph_run(job_name=job["name"], line=line) else: - finished = self.normal_run(job_name=job["name"], line=line) + finished = self._normal_run(job_name=job["name"], line=line) if not finished: return False return True - def normal_run(self, job_name, line): + def _normal_run(self, job_name, line): status = SUCCESS - response, file_path = container.run_test(run_id=self.run_id, run_cmd=line["cmd"]) + response, file_path = self.container.run_test(run_id=self.run_id, run_cmd=line["cmd"]) result = response.stdout type = LOG_TYPE if response.returncode: status = FAILURE if file_path: try: - result = utils.xml_parse(path=file_path, line=line["cmd"]) + result = self.utils.xml_parse(path=file_path, line=line["cmd"]) type = TESTSUITE_TYPE except: pass @@ -62,13 +87,13 @@ def normal_run(self, job_name, line): return False return True - def neph_run(self, job_name, line): + def _neph_run(self, job_name, line): status = SUCCESS working_dir = line["working_dir"] yaml_path = line["yaml_path"] neph_id = f"{self.run_id}:{job_name}:{line['name']}" cmd = f"export NEPH_RUN_ID='{neph_id}' \n cd {working_dir} \n {BIN_DIR}neph -y {yaml_path} -m CI" - response = container.execute_command(cmd=cmd, run_id=self.run_id) + response = self.container.execute_on_test_container(cmd=cmd, run_id=self.run_id) if response.returncode: status = FAILURE @@ -76,11 +101,11 @@ def neph_run(self, job_name, line): self.run_obj.result.append({"type": LOG_TYPE, "status": status, "name": name, "content": response.stdout}) self.run_obj.save() - for key in redis.keys(): + for key in self.redis.keys(): key = key.decode() if key.startswith(f"neph:{self.run_id}:{job_name}:{line['name']}"): status = SUCCESS - logs = redis.lrange(key, 0, -1) + logs = self.redis.lrange(key, 0, -1) all_logs = "" for log in logs: log = json.loads(log.decode()) @@ -95,21 +120,21 @@ def neph_run(self, job_name, line): return False return True - def build(self, job, clone_details, job_number): + def _build(self, job, clone_details, job_number): """Create VM with the required prerequisties and run installation steps to get it ready for running tests.""" env = self._get_run_env() - deployed = container.deploy(env=env, prerequisites=job["prerequisites"], repo_path=clone_details["remote_path"]) + deployed = self.container.deploy(env=env, prerequisites=job["prerequisites"], repo_path=clone_details["remote_path"]) installed = False if deployed: if job_number != 0: self._set_bin() - response = container.ssh_command(cmd=clone_details["cmd"]) + response = self.container.execute_on_helper(cmd=clone_details["cmd"]) if response.returncode: name = "{job_name}: Clone Repository".format(job_name=job["name"]) result = response.stdout - redis.rpush(self.run_id, result) + self.redis.rpush(self.run_id, result) else: - response = container.execute_command(cmd=job["install"], run_id=self.run_id) + response = self.container.execute_on_test_container(cmd=job["install"], run_id=self.run_id) if response.returncode: name = "{job_name}: Installation".format(job_name=job["name"]) result = response.stdout @@ -118,7 +143,7 @@ def build(self, job, clone_details, job_number): else: name = "{job_name}: Deploy".format(job_name=job["name"]) result = "Couldn't deploy a container" - redis.rpush(self.run_id, result) + self.redis.rpush(self.run_id, result) if not installed: self.run_obj.result.append({"type": LOG_TYPE, "status": ERROR, "name": name, "content": result}) @@ -126,7 +151,7 @@ def build(self, job, clone_details, job_number): return deployed, installed - def cal_status(self): + def _cal_status(self): """Calculate the status of the whole tests result has been stored on the BD's id.""" status = SUCCESS for result in self.run_obj.result: @@ -146,7 +171,7 @@ def _get_run_env(self): env.append(env_var) return env - def repo_clone_details(self): + def _repo_clone_details(self): """Clone repo.""" configs = InitialConfig() repo_remote_path = os.path.join(REPOS_DIR, self.run_obj.repo) @@ -180,8 +205,8 @@ def _get_bin(self, bin_remote_path, job_number): bin_release = bin_local_path.split(os.path.sep)[-1] bin_tmp_path = os.path.join(BIN_DIR, bin_release) cmd = f"cp {bin_remote_path} {bin_tmp_path}" - container.execute_command(cmd=cmd, run_id="", verbose=False) - container.ssh_get_remote_file(remote_path=bin_tmp_path, local_path=bin_local_path) + self.container.execute_on_test_container(cmd=cmd, run_id="", verbose=False) + self.container.get_remote_file_from_helper(remote_path=bin_tmp_path, local_path=bin_local_path) if os.path.exists(bin_local_path): self.run_obj.bin_release = bin_release @@ -191,8 +216,8 @@ def _set_bin(self): if self.run_obj.bin_release: bin_local_path = self._prepare_bin_dirs(self.bin_name) bin_remote_path = os.path.join(BIN_DIR, self.bin_name) - container.ssh_set_remote_file(remote_path=bin_remote_path, local_path=bin_local_path) - container.ssh_command(f"chmod +x {bin_remote_path}") + self.container.set_remote_file_on_helper(remote_path=bin_remote_path, local_path=bin_local_path) + self.container.execute_on_helper(f"chmod +x {bin_remote_path}") def build_and_test(self, run_id, repo_config): """Builds, runs tests, calculates status and gives report on telegram and your version control system. @@ -204,7 +229,7 @@ def build_and_test(self, run_id, repo_config): """ self.run_id = run_id self.run_obj = Run.get(run_id=self.run_id) - clone_details = self.repo_clone_details() + clone_details = self._repo_clone_details() worked = deployed = installed = True for i, job in enumerate(repo_config["jobs"]): if not (worked and deployed and installed): @@ -218,13 +243,13 @@ def build_and_test(self, run_id, repo_config): ).replace( " ", "" ) - redis.rpush(self.run_id, log) - deployed, installed = self.build(job=job, clone_details=clone_details, job_number=i) + self.redis.rpush(self.run_id, log) + deployed, installed = self._build(job=job, clone_details=clone_details, job_number=i) if deployed: if installed: - worked = self.test_run(job=job) + worked = self._test_run(job=job) self._get_bin(bin_remote_path=job.get("bin_path"), job_number=i) - container.delete() - redis.rpush(self.run_id, "hamada ok") - self.cal_status() - reporter.report(run_id=self.run_id, run_obj=self.run_obj) + self.container.delete() + self.redis.rpush(self.run_id, "hamada ok") + self._cal_status() + self.reporter.report(run_id=self.run_id, run_obj=self.run_obj) diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index f9d82081..91b3b8c6 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -16,19 +16,57 @@ from actions.reporter import Reporter from actions.runner import Runner -from actions.validator import Validator - -reporter = Reporter() -runner = Runner() -redis = Redis() -scheduler = Scheduler(connection=redis) -q = Queue(connection=redis, name="default") +from actions.validator import validate_yaml class Trigger: + def __init__(self): + self._redis = None + self._runner = None + self._reporter = None + self._queue = None + self._scheduler = None + self._vcs = None + + @property + def redis(self): + if not self._redis: + self._redis = Redis() + return self._redis + + @property + def runner(self): + if not self._runner: + self._runner = Runner() + return self._runner + + @property + def reporter(self): + if not self._reporter: + self._reporter = Reporter() + return self._reporter + + @property + def queue(self): + if not self._queue: + self._queue = Queue(connection=self.redis, name="default") + return self._queue + + @property + def scheduler(self): + if not self._scheduler: + self._scheduler = Scheduler(connection=self.redis) + return self._scheduler + + @property + def vcs(self): + if not self._vcs: + self._vcs = VCSFactory().get_cvn() + return self._vcs + def _load_config(self, repo, commit): - vcs_obj = VCSFactory().get_cvn(repo=repo) - script = vcs_obj.get_content(ref=commit, file_path="zeroCI.yaml") + self.vcs._set_repo_obj(repo=repo) + script = self.vcs.get_content(ref=commit, file_path="zeroCI.yaml") if script: try: config = yaml.safe_load(script) @@ -40,7 +78,7 @@ def _load_config(self, repo, commit): return False, "", msg - def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", run_id=None, triggered=False): + def _load_validate_config(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None): if run_id: run = Run.get(run_id=run_id) repo = run.repo @@ -48,71 +86,26 @@ def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", status, config, msg = self._load_config(repo, commit) if not status: run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered + repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered, triggered_by=triggered_by ) - return self._report(msg, run, run_id) - validator = Validator() - valid, msg = validator.validate_yaml(config) + self._report(msg, run, run_id) + return False + valid, msg = validate_yaml(config) if not valid: run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered + repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered, triggered_by=triggered_by ) - return self._report(msg, run, run_id) - - if run_id: - run, run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) - return self._trigger(repo_config=config, run=run, run_id=run_id) - - push = config["run_on"].get("push") - pull_request = config["run_on"].get("pull_request") - manual = config["run_on"].get("manual") - schedule = config["run_on"].get("schedule") - - if repo and branch and not schedule: - schedule_name = f"{repo}_{branch}" - scheduler.cancel(schedule_name) - if push: - trigger_branches = push["branches"] - if branch and branch in trigger_branches: - run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered - ) - return self._trigger(repo_config=config, run=run, run_id=run_id) - if pull_request: - target_branches = pull_request["branches"] - if target_branch and target_branch in target_branches: - run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered - ) - return self._trigger(repo_config=config, run=run, run_id=run_id) - if manual and triggered: - trigger_branches = manual["branches"] - if branch and branch in trigger_branches: - run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered - ) - return self._trigger(repo_config=config, run=run, run_id=run_id) - if schedule: - schedule_branch = schedule["branch"] - cron = schedule["cron"] - schedule_name = f"{repo}_{branch}" - if branch == schedule_branch: - scheduler.cron( - cron_string=cron, - func=self._trigger_schedule, - args=[repo, branch], - id=schedule_name, - timeout=-1, - ) - return + self._report(msg, run, run_id) + return False + return config - def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False): + def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None): configs = InitialConfig() timestamp = int(datetime.now().timestamp()) if run_id: # Triggered from id. run = Run.get(run_id=run_id) - triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") + triggered_by = triggered_by or request.environ.get("beaker.session").get("username").strip(".3bot") data = { "timestamp": timestamp, "commit": run.commit, @@ -134,16 +127,16 @@ def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_i os.remove(bin_path) run.bin_release = None run.save() - for key in redis.keys(): + for key in self.redis.keys(): if run_id in key.decode(): - redis.delete(key) - redis.publish("zeroci_status", json.dumps(data)) + self.redis.delete(key) + self.redis.publish("zeroci_status", json.dumps(data)) else: # Triggered from vcs webhook or rebuild using the button. if repo in configs.repos: - triggered_by = "VCS Hook" + triggered_by = triggered_by or "VCS Hook" if triggered: - triggered_by = request.environ.get("beaker.session").get("username").strip(".3bot") + triggered_by = triggered_by or request.environ.get("beaker.session").get("username").strip(".3bot") data = { "timestamp": timestamp, "commit": commit, @@ -158,48 +151,91 @@ def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_i run.save() run_id = str(run.run_id) data["run_id"] = run_id - redis.publish("zeroci_status", json.dumps(data)) + self.redis.publish("zeroci_status", json.dumps(data)) if run and run_id: return run, run_id return None, None + def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", run_id=None, triggered=False): + config = self._load_validate_config(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + if not config: + return + + if run_id: + run, run_id = self._prepare_run_object(run_id=run_id, triggered=triggered) + return self._trigger(repo_config=config, run=run, run_id=run_id) + + push = config["run_on"].get("push") + pull_request = config["run_on"].get("pull_request") + manual = config["run_on"].get("manual") + schedule = config["run_on"].get("schedule") + + if repo and branch and not schedule: + schedule_name = f"{repo}_{branch}" + self.scheduler.cancel(schedule_name) + if push: + trigger_branches = push["branches"] + if branch and branch in trigger_branches: + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) + return self._trigger(repo_config=config, run=run, run_id=run_id) + if pull_request: + target_branches = pull_request["branches"] + if target_branch and target_branch in target_branches: + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) + return self._trigger(repo_config=config, run=run, run_id=run_id) + if manual and triggered: + trigger_branches = manual["branches"] + if branch and branch in trigger_branches: + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=commit, committer=committer, triggered=triggered + ) + return self._trigger(repo_config=config, run=run, run_id=run_id) + if schedule: + schedule_branch = schedule["branch"] + cron = schedule["cron"] + schedule_name = f"{repo}_{branch}" + if branch == schedule_branch: + self.scheduler.cron( + cron_string=cron, + func=self._trigger_schedule, + args=[repo, branch], + id=schedule_name, + timeout=-1, + ) + def _trigger(self, repo_config, run, run_id): if run and run_id: configs = InitialConfig() link = f"{configs.domain}/repos/{run.repo}/{run.branch}/{str(run.run_id)}" - vcs_obj = VCSFactory().get_cvn(repo=run.repo) - vcs_obj.status_send(status=PENDING, link=link, commit=run.commit) + self.vcs._set_repo_obj(repo=run.repo) + self.vcs.status_send(status=PENDING, link=link, commit=run.commit) # TODO: before triggering, check that there is not a run with same commit and in state pending. - job = q.enqueue_call(func=runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) + job = self.queue.enqueue_call(func=self.runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) return job - return def _trigger_schedule(self, repo, branch): - vcs_obj = VCSFactory().get_cvn(repo=repo) - last_commit = vcs_obj.get_last_commit(branch=branch) - committer = vcs_obj.get_committer(commit=last_commit) + triggered_by = "ZeroCI Scheduler" + self.vcs._set_repo_obj(repo=repo) + last_commit = self.vcs.get_last_commit(branch=branch) + committer = self.vcs.get_committer(commit=last_commit) where = {"repo": repo, "branch": branch, "commit": last_commit, "status": PENDING} + run, run_id = self._prepare_run_object(repo=repo, branch=branch, commit=last_commit, committer=committer, triggered_by=triggered_by) exist_run = Run.get_objects(fields=["status"], **where) - run, run_id = self._prepare_run_object(repo=repo, branch=branch, commit=last_commit, committer=committer) if exist_run: msg = f"There is a running job from this commit {last_commit}" return self._report(msg, run, run_id) - run.triggered_by = "ZeroCI Scheduler" - run.save() - status, config, msg = self._load_config(repo, last_commit) - if not status: - return self._report(msg, run, run_id) - validator = Validator() - valid, msg = validator.validate_yaml(config) - if not valid: - return self._report(msg, run, run_id) - runner.build_and_test(run_id, config) + config = self._load_validate_config(run_id=run_id, triggered_by=triggered_by) + if config: + self.runner.build_and_test(run_id, config) def _report(self, msg, run, run_id): msg = f"{msg} (see examples: https://github.com/threefoldtech/zeroCI/tree/development/docs/config)" - redis.rpush(run_id, msg) + self.redis.rpush(run_id, msg) run.result.append({"type": LOG_TYPE, "status": ERROR, "name": "Yaml File", "content": msg}) run.status = ERROR run.save() - reporter.report(run_id=run_id, run_obj=run) - return + self.reporter.report(run_id=run_id, run_obj=run) diff --git a/backend/actions/validator.py b/backend/actions/validator.py index 86c5036d..fa9a153b 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -4,229 +4,228 @@ from croniter import croniter -class Validator: - def _validate_test_script(self, test_script): - msg = "" - if not test_script: - msg = "script should be in job file and shouldn't be empty" +def _validate_test_script(test_script): + msg = "" + if not test_script: + msg = "script should be in job file and shouldn't be empty" + else: + if not isinstance(test_script, list): + msg = "script should be list" else: - if not isinstance(test_script, list): - msg = "script should be list" - else: - for item in test_script: - if not isinstance(item, dict): - msg = "Every element in script should be dict" + for item in test_script: + if not isinstance(item, dict): + msg = "Every element in script should be dict" + else: + name = item.get("name") + if not name: + msg = "Every element in script should conttain a name" else: - name = item.get("name") - if not name: - msg = "Every element in script should conttain a name" + if not isinstance(name, str): + msg = "Every name in script should be str" + cmd = item.get("cmd") + if not cmd: + type = item.get("type") + if not type: + msg = "Every element in script should conttain a cmd or type" else: - if not isinstance(name, str): - msg = "Every name in script should be str" - cmd = item.get("cmd") - if not cmd: - type = item.get("type") - if not type: - msg = "Every element in script should conttain a cmd or type" + if type == "neph": + working_dir = item.get("working_dir") + if not working_dir: + msg = "working_dir should be added for neph type" + yaml_path = item.get("yaml_path") + if not yaml_path: + msg = "yaml_path should be added for neph type" else: - if type == "neph": - working_dir = item.get("working_dir") - if not working_dir: - msg = "working_dir should be added for neph type" - yaml_path = item.get("yaml_path") - if not yaml_path: - msg = "yaml_path should be added for neph type" - else: - msg = f"{type} is not supported" - else: - if not isinstance(cmd, str): - msg = "Every cmd in script should be str" - return msg - - def _validate_install_script(self, install_script): - msg = "" - if not install_script: - msg = "install should be in job file and shouldn't be empty" - else: - if not isinstance(install_script, str): - msg = "install should be str" - - return msg - - def _validate_prerequisites(self, prerequisites): - msg = "" - if not prerequisites: - msg = "prerequisites should be in job file and shouldn't be empty" + msg = f"{type} is not supported" + else: + if not isinstance(cmd, str): + msg = "Every cmd in script should be str" + return msg + +def _validate_install_script(install_script): + msg = "" + if not install_script: + msg = "install should be in job file and shouldn't be empty" + else: + if not isinstance(install_script, str): + msg = "install should be str" + + return msg + +def _validate_prerequisites(prerequisites): + msg = "" + if not prerequisites: + msg = "prerequisites should be in job file and shouldn't be empty" + else: + if not isinstance(prerequisites, dict): + msg = "prerequisites should be dict" else: - if not isinstance(prerequisites, dict): - msg = "prerequisites should be dict" + image_name = prerequisites.get("image_name") + if not image_name: + msg = "prerequisites should contain image_name and shouldn't be empty" else: - image_name = prerequisites.get("image_name") - if not image_name: - msg = "prerequisites should contain image_name and shouldn't be empty" + if not isinstance(image_name, str): + msg = "image_name should be str" else: - if not isinstance(image_name, str): - msg = "image_name should be str" + if ":" in image_name: + repository, tag = image_name.split(":") else: - if ":" in image_name: - repository, tag = image_name.split(":") - else: - repository = image_name - tag = "latest" - response = requests.get(f"https://index.docker.io/v1/repositories/{repository}/tags/{tag}") - if response.status_code is not requests.codes.ok: - msg = "Invalid docker image's name " - shell_bin = prerequisites.get("shell_bin") - if shell_bin: - if not isinstance(shell_bin, str): - msg = "shell_bin should be str" + repository = image_name + tag = "latest" + response = requests.get(f"https://index.docker.io/v1/repositories/{repository}/tags/{tag}") + if response.status_code is not requests.codes.ok: + msg = "Invalid docker image's name " + shell_bin = prerequisites.get("shell_bin") + if shell_bin: + if not isinstance(shell_bin, str): + msg = "shell_bin should be str" + return msg + +def _validate_bin_path(bin_path): + msg = "" + if bin_path: + if not isinstance(bin_path, str): + msg = "bin_path should be str" + + return msg + +def _validate_job_name(name): + msg = "" + if not name: + msg = "name should be in job file and shouldn't be empty" + else: + if not isinstance(name, str): + msg = "name of the job should be str" + + return msg + +def _validate_job(job): + job_name = job.get("name") + msg = _validate_job_name(job_name) + if msg: return msg - def _validate_bin_path(self, bin_path): - msg = "" - if bin_path: - if not isinstance(bin_path, str): - msg = "bin_path should be str" - + bin_path = job.get("bin_path") + msg = _validate_bin_path(bin_path) + if msg: return msg - def _validate_job_name(self, name): - msg = "" - if not name: - msg = "name should be in job file and shouldn't be empty" - else: - if not isinstance(name, str): - msg = "name of the job should be str" - + test_script = job.get("script") + msg = _validate_test_script(test_script) + if msg: return msg - def _validate_job(self, job): - job_name = job.get("name") - msg = self._validate_job_name(job_name) - if msg: - return msg - - bin_path = job.get("bin_path") - msg = self._validate_bin_path(bin_path) - if msg: - return msg - - test_script = job.get("script") - msg = self._validate_test_script(test_script) - if msg: - return msg - - install_script = job.get("install") - msg = self._validate_install_script(install_script) - if msg: - return msg - - prerequisites = job.get("prerequisites") - msg = self._validate_prerequisites(prerequisites) + install_script = job.get("install") + msg = _validate_install_script(install_script) + if msg: return msg - def _validate_run_on(self, run_on): - msg = "" - if not run_on: - msg = "run_on should be in yaml and shouldn't be empty" + prerequisites = job.get("prerequisites") + msg = _validate_prerequisites(prerequisites) + return msg + +def _validate_run_on(run_on): + msg = "" + if not run_on: + msg = "run_on should be in yaml and shouldn't be empty" + else: + if not isinstance(run_on, dict): + msg = "run_on should have push or pull_request as keys" else: - if not isinstance(run_on, dict): - msg = "run_on should have push or pull_request as keys" + push = run_on.get("push") + pull_request = run_on.get("pull_request") + schedule = run_on.get("schedule") + manual = run_on.get("manual") + if not any([push, pull_request, schedule, manual]): + msg = "run_on should have push, pull_request, schedule or manual as keys and at least one of them should be filled" else: - push = run_on.get("push") - pull_request = run_on.get("pull_request") - schedule = run_on.get("schedule") - manual = run_on.get("manual") - if not any([push, pull_request, schedule, manual]): - msg = "run_on should have push, pull_request, schedule or manual as keys and at least one of them should be filled" - else: - if push: - if not isinstance(push, dict): - msg = "push should have branches as a key" + if push: + if not isinstance(push, dict): + msg = "push should have branches as a key" + else: + branches = push.get("branches") + if not branches: + msg = "branches on push shouldn't be empty" else: - branches = push.get("branches") - if not branches: - msg = "branches on push shouldn't be empty" + if not isinstance(branches, list): + msg = "branches should be of contain list of the branches" else: - if not isinstance(branches, list): - msg = "branches should be of contain list of the branches" - else: - for branch in branches: - if not isinstance(branch, str): - msg = "branches should be list of str" - if pull_request: - if not isinstance(pull_request, dict): - msg = "pull_request should have branches as a key" + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + if pull_request: + if not isinstance(pull_request, dict): + msg = "pull_request should have branches as a key" + else: + branches = pull_request.get("branches") + if not branches: + msg = "branches on pull_request shouldn't be empty" else: - branches = pull_request.get("branches") - if not branches: - msg = "branches on pull_request shouldn't be empty" + if not isinstance(branches, list): + msg = "branches should be of contain list of the branches" else: - if not isinstance(branches, list): - msg = "branches should be of contain list of the branches" - else: - for branch in branches: - if not isinstance(branch, str): - msg = "branches should be list of str" - - if manual: - if not isinstance(manual, dict): - msg = "manual should have branches as a key" + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + + if manual: + if not isinstance(manual, dict): + msg = "manual should have branches as a key" + else: + branches = manual.get("branches") + if not branches: + msg = "branches on manual shouldn't be empty" else: - branches = manual.get("branches") - if not branches: - msg = "branches on manual shouldn't be empty" + if not isinstance(branches, list): + msg = "branches should be of contain list of the branches" else: - if not isinstance(branches, list): - msg = "branches should be of contain list of the branches" - else: - for branch in branches: - if not isinstance(branch, str): - msg = "branches should be list of str" - - if schedule: - if not isinstance(schedule, dict): - msg = "schedule should have branch and cron as keys" + for branch in branches: + if not isinstance(branch, str): + msg = "branches should be list of str" + + if schedule: + if not isinstance(schedule, dict): + msg = "schedule should have branch and cron as keys" + else: + branch = schedule.get("branch") + cron = schedule.get("cron") + if not branch: + msg = "branch on schedule shouldn't be empty" else: - branch = schedule.get("branch") - cron = schedule.get("cron") - if not branch: - msg = "branch on schedule shouldn't be empty" - else: - if not isinstance(branch, str): - msg = "branch should be str" - if not cron: - msg = "cron on schedule shouldn't be empty" + if not isinstance(branch, str): + msg = "branch should be str" + if not cron: + msg = "cron on schedule shouldn't be empty" + else: + if not isinstance(cron, str): + msg = "cron should be str" else: - if not isinstance(cron, str): - msg = "cron should be str" - else: - try: - croniter(cron) - except Exception as e: - msg = traceback.format_exc(e) - - return msg - - def validate_yaml(self, config): - jobs = config.get("jobs") - if not jobs: - msg = "jobs should be in yaml and shouldn't be empty" + try: + croniter(cron) + except Exception as e: + msg = traceback.format_exc(e) + + return msg + +def validate_yaml(config): + jobs = config.get("jobs") + if not jobs: + msg = "jobs should be in yaml and shouldn't be empty" + else: + if not isinstance(jobs, list): + msg = "jobs should be list" else: - if not isinstance(jobs, list): - msg = "jobs should be list" + if len(jobs) > 3: + msg = "jobs shouldn't be more than 3" else: - if len(jobs) > 3: - msg = "jobs shouldn't be more than 3" - else: - for job in jobs: - msg = self._validate_job(job) - if msg: - break + for job in jobs: + msg = _validate_job(job) + if msg: + break - run_on = config.get("run_on") - msg = self._validate_run_on(run_on) + run_on = config.get("run_on") + msg = _validate_run_on(run_on) - if msg: - return False, msg - return True, "" + if msg: + return False, msg + return True, "" diff --git a/backend/apis/login.py b/backend/apis/login.py index 4a09d869..608327c2 100644 --- a/backend/apis/login.py +++ b/backend/apis/login.py @@ -14,9 +14,6 @@ CALLBACK_URL = "/auth/3bot_callback" REDIRECT_URL = "https://login.threefold.me" - - -utils = Utils() PRIV_KEY = nacl.signing.SigningKey.generate() @@ -28,7 +25,7 @@ def login(): public_key = PRIV_KEY.verify_key if provider and provider == "3bot": - state = utils.random_string() + state = Utils.random_string() session["next_url"] = next_url session["state"] = state app_id = request.get_header("host") diff --git a/backend/apis/trigger.py b/backend/apis/trigger.py index 93252010..274bc916 100644 --- a/backend/apis/trigger.py +++ b/backend/apis/trigger.py @@ -12,7 +12,7 @@ from apis.base import app, check_configs, user trigger = Trigger() -q = Queue(connection=Redis(), name="zeroci") +queue = Queue(connection=Redis(), name="zeroci") @app.route("/git_trigger", method=["POST"]) @@ -35,7 +35,7 @@ def git_trigger(): committer = request.json["pusher"]["login"] branch_exist = not commit.startswith("000000") if branch_exist: - job = q.enqueue_call( + job = queue.enqueue_call( trigger.enqueue, args=(repo, branch, commit, committer, "", None, False), result_ttl=5000, @@ -51,7 +51,7 @@ def git_trigger(): target_branch = request.json["pull_request"]["base"]["ref"] commit = request.json["pull_request"]["head"]["sha"] committer = request.json["sender"]["login"] - job = q.enqueue_call( + job = queue.enqueue_call( trigger.enqueue, args=(repo, branch, commit, committer, target_branch, None, False), result_ttl=5000, @@ -78,7 +78,7 @@ def run_trigger(): return HTTPResponse( f"There is a running job for this run_id {run_id}, please try again after this run finishes", 503 ) - job = q.enqueue_call( + job = queue.enqueue_call( trigger.enqueue, args=("", "", "", "", "", run_id, True), result_ttl=5000, timeout=20000 ) if job: @@ -96,7 +96,7 @@ def run_trigger(): f"There is a running job from this commit {last_commit}, please try again after this run finishes", 503 ) if last_commit: - job = q.enqueue_call( + job = queue.enqueue_call( trigger.enqueue, args=(repo, branch, last_commit, committer, "", None, True), result_ttl=5000, diff --git a/backend/deployment/container.py b/backend/deployment/container.py index 691d6542..bed8a12e 100644 --- a/backend/deployment/container.py +++ b/backend/deployment/container.py @@ -3,10 +3,10 @@ import time import paramiko -import redis import yaml from kubernetes import client, config from kubernetes.stream import stream +from redis import Redis from utils.constants import BIN_DIR from utils.utils import Utils @@ -24,11 +24,30 @@ def __init__(self, rc, out): class Container(Utils): - def __init__(self): + def __init__(self, name=None, test_container_name=None, helper_container_name=None, namespace=None): super().__init__() + config.load_incluster_config() self.shell_bin = "/bin/sh" + self.name = name + self.test_container_name = test_container_name + self.helper_container_name = helper_container_name + self.namespace = namespace + self._redis = None + self._client = None + + @property + def redis(self): + if not self._redis: + self._redis = Redis() + return self._redis + + @property + def client(self): + if not self._client: + self._client = client.CoreV1Api() + return self._client - def ssh_command(self, cmd, ip=None, port=22): + def execute_on_helper(self, cmd): """Execute a command on a remote machine using ssh. :param cmd: command to be executed on a remote machine. :type cmd: str @@ -38,13 +57,11 @@ def ssh_command(self, cmd, ip=None, port=22): :type port: int :return: Execution object containing (returncode, stdout) """ - if not ip: - ip = self.name out = "" client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) try: - client.connect(hostname=ip, port=port, timeout=30) + client.connect(hostname=self.name, port=22, timeout=30) except: out = "Couldn't ssh on the helper container, maybe the test broke the ssh or the helper container become unreachable" rc = 1 @@ -62,12 +79,10 @@ def ssh_command(self, cmd, ip=None, port=22): return Complete_Execution(rc, out) - def ssh_get_remote_file(self, remote_path, local_path, ip=None, port=22): - if not ip: - ip = self.name + def get_remote_file_from_helper(self, remote_path, local_path): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) - client.connect(hostname=ip, port=port, timeout=30) + client.connect(hostname=self.name, port=22, timeout=30) ftp = client.open_sftp() try: ftp.get(remote_path, local_path) @@ -76,12 +91,10 @@ def ssh_get_remote_file(self, remote_path, local_path, ip=None, port=22): except: return False - def ssh_set_remote_file(self, remote_path, local_path, ip=None, port=22): - if not ip: - ip = self.name + def set_remote_file_on_helper(self, remote_path, local_path): client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) - client.connect(hostname=ip, port=port, timeout=30) + client.connect(hostname=self.name, port=22, timeout=30) ftp = client.open_sftp() try: ftp.put(local_path, remote_path) @@ -90,12 +103,11 @@ def ssh_set_remote_file(self, remote_path, local_path, ip=None, port=22): except: return False - def redis_push(self, run_id, content, verbose=True): + def _redis_push(self, run_id, content, verbose=True): if verbose: - r = redis.Redis() - r.rpush(run_id, content) + self.redis.rpush(run_id, content) - def execute_command(self, cmd, run_id, verbose=True): + def execute_on_test_container(self, cmd, run_id, verbose=True): """Execute a command on a remote machine using ssh. :param cmd: command to be executed on a remote machine. @@ -123,7 +135,7 @@ def execute_command(self, cmd, run_id, verbose=True): ) except: out += "Couldn't run on the testing container, container become unreachable" - self.redis_push(run_id, out, verbose=verbose) + self._redis_push(run_id, out, verbose=verbose) rc = 137 return Complete_Execution(rc, out) @@ -133,18 +145,18 @@ def execute_command(self, cmd, run_id, verbose=True): content = response.read_stdout(timeout=600) except: msg = "\nConnectionError: Couldn't execute cmd on the runner" - self.redis_push(run_id, msg, verbose=verbose) + self._redis_push(run_id, msg, verbose=verbose) out += msg rc = 124 break end = time.time() time_taken = end - start if content: - self.redis_push(run_id, content, verbose=verbose) + self._redis_push(run_id, content, verbose=verbose) out += content elif time_taken > 590: msg = "\nTimeout exceeded 10 mins with no output" - self.redis_push(run_id, msg, verbose=verbose) + self._redis_push(run_id, msg, verbose=verbose) out += msg rc = 124 response.close() @@ -154,19 +166,19 @@ def execute_command(self, cmd, run_id, verbose=True): rc = response.returncode if rc == 137: msg = "Runner expired (job takes more than 1 hour)" - self.redis_push(run_id, msg, verbose=verbose) + self._redis_push(run_id, msg, verbose=verbose) out += msg return Complete_Execution(rc, out) - def get_remote_file(self, remote_path, local_path): - response = self.execute_command(f"cat {remote_path}", run_id="", verbose=False) + def get_remote_file_from_test_container(self, remote_path, local_path): + response = self.execute_on_test_container(f"cat {remote_path}", run_id="", verbose=False) if not response.returncode: self.write_file(text=response.stdout, file_path=local_path) return True return False - def create_pod(self, env, prerequisites, repo_path): + def _create_pod(self, env, prerequisites, repo_path): # zeroci vol bin_mount_path = BIN_DIR bin_vol_name = "bin-path" @@ -229,7 +241,7 @@ def create_pod(self, env, prerequisites, repo_path): pod = client.V1Pod(api_version="v1", kind="Pod", metadata=meta, spec=spec) self.client.create_namespaced_pod(body=pod, namespace=self.namespace) - def create_service(self): + def _create_service(self): port = client.V1ServicePort(name="ssh", port=22) spec = client.V1ServiceSpec(ports=[port], selector={"app": self.name}) meta = client.V1ObjectMeta(name=self.name, namespace=self.namespace, labels={"app": self.name}) @@ -243,8 +255,6 @@ def deploy(self, env, prerequisites, repo_path): :type prerequisites: list :return: bool (True: if virtual machine is created). """ - config.load_incluster_config() - self.client = client.CoreV1Api() self.name = self.random_string() self.test_container_name = f"test-{self.name}" self.helper_container_name = f"helper-{self.name}" @@ -253,9 +263,9 @@ def deploy(self, env, prerequisites, repo_path): self.shell_bin = prerequisites["shell_bin"] for _ in range(RETRIES): try: - self.create_pod(env=env, prerequisites=prerequisites, repo_path=repo_path) - self.create_service() - self.wait_for_container() + self._create_pod(env=env, prerequisites=prerequisites, repo_path=repo_path) + self._create_service() + self._wait_for_container() break except: self.delete() @@ -263,7 +273,7 @@ def deploy(self, env, prerequisites, repo_path): return False return True - def wait_for_container(self): + def _wait_for_container(self): for _ in range(TIMEOUT): time.sleep(1) container_status = self.client.read_namespaced_pod_status(namespace=self.namespace, name=self.name) @@ -291,14 +301,14 @@ def run_test(self, run_cmd, run_id): :type env: dict :return: path to xml file if exist and subprocess object containing (returncode, stdout, stderr) """ - response = self.execute_command(run_cmd, run_id=run_id) + response = self.execute_on_test_container(run_cmd, run_id=run_id) file_path = "/zeroci/xml/{}.xml".format(self.random_string()) remote_path = "/test.xml" - copied = self.get_remote_file(remote_path=remote_path, local_path=file_path) + copied = self.get_remote_file_from_test_container(remote_path=remote_path, local_path=file_path) if copied: file_path = file_path delete_cmd = f"rm -f {remote_path}" - self.execute_command(delete_cmd, run_id=run_id) + self.execute_on_test_container(delete_cmd, run_id=run_id) else: if os.path.exists(file_path): os.remove(file_path) diff --git a/backend/packages/vcs/vcs.py b/backend/packages/vcs/vcs.py index d9e54cd1..93f4447d 100644 --- a/backend/packages/vcs/vcs.py +++ b/backend/packages/vcs/vcs.py @@ -152,10 +152,13 @@ def __init__(self, repo=None): configs = InitialConfig() self.HOOK_URL = urljoin(configs.domain, "git_trigger") if configs.vcs_token: - self.repo = repo self.github_cl = GH(configs.vcs_token) if repo: - self.repo_obj = self.github_cl.get_repo(self.repo) + self._set_repo_obj(repo) + + def _set_repo_obj(self, repo): + self.repo = repo + self.repo_obj = self.github_cl.get_repo(self.repo) @VCSInterface.call_trial def status_send( @@ -264,10 +267,14 @@ def _get_gitea_cl(): self.repo_obj = giteapy.RepositoryApi(_get_gitea_cl()) self.user_obj = giteapy.UserApi(_get_gitea_cl()) self.org_obj = giteapy.OrganizationApi(_get_gitea_cl()) + if repo: - self.repo = repo - self.owner = repo.split("/")[0] # org name - self.repo_name = self.repo.split("/")[-1] + self._set_repo_obj(repo) + + def _set_repo_obj(self, repo): + self.repo = repo + self.owner = repo.split("/")[0] # org name + self.repo_name = self.repo.split("/")[-1] @VCSInterface.call_trial def status_send( diff --git a/backend/utils/utils.py b/backend/utils/utils.py index ed03c817..e93cde8a 100644 --- a/backend/utils/utils.py +++ b/backend/utils/utils.py @@ -10,6 +10,10 @@ class Utils: + @staticmethod + def random_string(self): + return "s" + uuid4().hex + def execute_cmd(self, cmd, timeout=3600): with Popen(cmd, shell=True, universal_newlines=True, stdout=PIPE, stderr=PIPE, encoding="utf-8") as process: try: @@ -23,9 +27,6 @@ def execute_cmd(self, cmd, timeout=3600): return CompletedProcess(process.args, returncode=retruncode, stdout=stdout, stderr=stderr) - def random_string(self): - return "s" + str(uuid4())[:10].replace("-", "") - def write_file(self, text, file_path, append=False, binary=False): """Write result file. From 0379b99140524249e0aa1f051e400990e2a334b4 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Wed, 23 Dec 2020 16:43:38 +0200 Subject: [PATCH 12/13] Formatting --- backend/actions/runner.py | 7 +++--- backend/actions/trigger.py | 40 +++++++++++++++++++++++++-------- backend/actions/validator.py | 7 ++++++ backend/deployment/container.py | 2 +- 4 files changed, 43 insertions(+), 13 deletions(-) diff --git a/backend/actions/runner.py b/backend/actions/runner.py index 7cf9ed0b..ae2d163e 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -46,13 +46,12 @@ def reporter(self): if not self._reporter: self._reporter = Reporter() return self._reporter - + @property def utils(self): if not self._utils: self._utils = Utils() return self._utils - def _test_run(self, job): """Runs tests and store the result in DB.""" @@ -123,7 +122,9 @@ def _neph_run(self, job_name, line): def _build(self, job, clone_details, job_number): """Create VM with the required prerequisties and run installation steps to get it ready for running tests.""" env = self._get_run_env() - deployed = self.container.deploy(env=env, prerequisites=job["prerequisites"], repo_path=clone_details["remote_path"]) + deployed = self.container.deploy( + env=env, prerequisites=job["prerequisites"], repo_path=clone_details["remote_path"] + ) installed = False if deployed: if job_number != 0: diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index 91b3b8c6..fef87a6e 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -45,13 +45,13 @@ def reporter(self): if not self._reporter: self._reporter = Reporter() return self._reporter - + @property def queue(self): if not self._queue: self._queue = Queue(connection=self.redis, name="default") return self._queue - + @property def scheduler(self): if not self._scheduler: @@ -78,7 +78,9 @@ def _load_config(self, repo, commit): return False, "", msg - def _load_validate_config(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None): + def _load_validate_config( + self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None + ): if run_id: run = Run.get(run_id=run_id) repo = run.repo @@ -86,20 +88,34 @@ def _load_validate_config(self, repo="", branch="", commit="", committer="", run status, config, msg = self._load_config(repo, commit) if not status: run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered, triggered_by=triggered_by + repo=repo, + branch=branch, + commit=commit, + committer=committer, + run_id=run_id, + triggered=triggered, + triggered_by=triggered_by, ) self._report(msg, run, run_id) return False valid, msg = validate_yaml(config) if not valid: run, run_id = self._prepare_run_object( - repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered, triggered_by=triggered_by + repo=repo, + branch=branch, + commit=commit, + committer=committer, + run_id=run_id, + triggered=triggered, + triggered_by=triggered_by, ) self._report(msg, run, run_id) return False return config - def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None): + def _prepare_run_object( + self, repo="", branch="", commit="", committer="", run_id=None, triggered=False, triggered_by=None + ): configs = InitialConfig() timestamp = int(datetime.now().timestamp()) if run_id: @@ -157,7 +173,9 @@ def _prepare_run_object(self, repo="", branch="", commit="", committer="", run_i return None, None def enqueue(self, repo="", branch="", commit="", committer="", target_branch="", run_id=None, triggered=False): - config = self._load_validate_config(repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered) + config = self._load_validate_config( + repo=repo, branch=branch, commit=commit, committer=committer, run_id=run_id, triggered=triggered + ) if not config: return @@ -214,7 +232,9 @@ def _trigger(self, repo_config, run, run_id): self.vcs._set_repo_obj(repo=run.repo) self.vcs.status_send(status=PENDING, link=link, commit=run.commit) # TODO: before triggering, check that there is not a run with same commit and in state pending. - job = self.queue.enqueue_call(func=self.runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000) + job = self.queue.enqueue_call( + func=self.runner.build_and_test, args=(run_id, repo_config), result_ttl=5000, timeout=20000 + ) return job def _trigger_schedule(self, repo, branch): @@ -223,7 +243,9 @@ def _trigger_schedule(self, repo, branch): last_commit = self.vcs.get_last_commit(branch=branch) committer = self.vcs.get_committer(commit=last_commit) where = {"repo": repo, "branch": branch, "commit": last_commit, "status": PENDING} - run, run_id = self._prepare_run_object(repo=repo, branch=branch, commit=last_commit, committer=committer, triggered_by=triggered_by) + run, run_id = self._prepare_run_object( + repo=repo, branch=branch, commit=last_commit, committer=committer, triggered_by=triggered_by + ) exist_run = Run.get_objects(fields=["status"], **where) if exist_run: msg = f"There is a running job from this commit {last_commit}" diff --git a/backend/actions/validator.py b/backend/actions/validator.py index fa9a153b..14694332 100644 --- a/backend/actions/validator.py +++ b/backend/actions/validator.py @@ -42,6 +42,7 @@ def _validate_test_script(test_script): msg = "Every cmd in script should be str" return msg + def _validate_install_script(install_script): msg = "" if not install_script: @@ -52,6 +53,7 @@ def _validate_install_script(install_script): return msg + def _validate_prerequisites(prerequisites): msg = "" if not prerequisites: @@ -81,6 +83,7 @@ def _validate_prerequisites(prerequisites): msg = "shell_bin should be str" return msg + def _validate_bin_path(bin_path): msg = "" if bin_path: @@ -89,6 +92,7 @@ def _validate_bin_path(bin_path): return msg + def _validate_job_name(name): msg = "" if not name: @@ -99,6 +103,7 @@ def _validate_job_name(name): return msg + def _validate_job(job): job_name = job.get("name") msg = _validate_job_name(job_name) @@ -124,6 +129,7 @@ def _validate_job(job): msg = _validate_prerequisites(prerequisites) return msg + def _validate_run_on(run_on): msg = "" if not run_on: @@ -207,6 +213,7 @@ def _validate_run_on(run_on): return msg + def validate_yaml(config): jobs = config.get("jobs") if not jobs: diff --git a/backend/deployment/container.py b/backend/deployment/container.py index bed8a12e..4a64eaa5 100644 --- a/backend/deployment/container.py +++ b/backend/deployment/container.py @@ -40,7 +40,7 @@ def redis(self): if not self._redis: self._redis = Redis() return self._redis - + @property def client(self): if not self._client: From f2979d2bc8bb109c3ff17e74c213fa3e6a45e353 Mon Sep 17 00:00:00 2001 From: ahmedhanafy725 Date: Wed, 23 Dec 2020 17:11:11 +0200 Subject: [PATCH 13/13] Remove useless imports Fix run object name --- backend/actions/runner.py | 1 - backend/actions/trigger.py | 1 - backend/models/run.py | 4 +++- backend/utils/utils.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/backend/actions/runner.py b/backend/actions/runner.py index ae2d163e..9f08f68b 100644 --- a/backend/actions/runner.py +++ b/backend/actions/runner.py @@ -17,7 +17,6 @@ from utils.utils import Utils from actions.reporter import Reporter -from actions.validator import Validator class Runner: diff --git a/backend/actions/trigger.py b/backend/actions/trigger.py index fef87a6e..ad6777c5 100644 --- a/backend/actions/trigger.py +++ b/backend/actions/trigger.py @@ -7,7 +7,6 @@ from bottle import request from models.initial_config import InitialConfig from models.run import Run -from models.schedule_info import ScheduleInfo from packages.vcs.vcs import VCSFactory from redis import Redis from rq import Queue diff --git a/backend/models/run.py b/backend/models/run.py index 4b1bd1ad..194e59e0 100644 --- a/backend/models/run.py +++ b/backend/models/run.py @@ -1,5 +1,7 @@ import uuid +from utils.utils import Utils + from .base import Document, ModelFactory, StoredFactory, fields @@ -19,6 +21,6 @@ class Run(ModelFactory): _model = StoredFactory(RunModel) def __new__(cls, **kwargs): - name = uuid.uuid4().hex + name = Utils.random_string() kwargs["timestamp"] = int(kwargs["timestamp"]) return cls._model.new(name=name, **kwargs) diff --git a/backend/utils/utils.py b/backend/utils/utils.py index e93cde8a..81dda37d 100644 --- a/backend/utils/utils.py +++ b/backend/utils/utils.py @@ -11,7 +11,7 @@ class Utils: @staticmethod - def random_string(self): + def random_string(): return "s" + uuid4().hex def execute_cmd(self, cmd, timeout=3600):