Paska tooling building and compiling
This commit is contained in:
@@ -158,3 +158,7 @@ cython_debug/
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
#Generated data by Paska tooling
|
||||
generated_data/
|
||||
english-left3words-distsim.tagger
|
||||
+13
-13
@@ -9,19 +9,19 @@ class ProcessTextFile():
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
|
||||
# def load_csv(self, filename, col_value):
|
||||
# all_lines = []
|
||||
# with open(filename, mode='r') as csv_file:
|
||||
# csv_reader = csv.reader(csv_file, delimiter=';')
|
||||
# line_count = 0
|
||||
# for row in csv_reader:
|
||||
# if line_count == 0:
|
||||
# print(f'Column names are {", ".join(row)}')
|
||||
# line_count += 1
|
||||
# all_lines.append(row[0])
|
||||
# line_count += 1
|
||||
# print(f'Processed {line_count} lines.')
|
||||
# return all_lines
|
||||
def load_csv(self, filename, col_value):
|
||||
all_lines = []
|
||||
with open(filename, mode='r') as csv_file:
|
||||
csv_reader = csv.reader(csv_file, delimiter=';')
|
||||
line_count = 0
|
||||
for row in csv_reader:
|
||||
if line_count == 0:
|
||||
print(f'Column names are {", ".join(row)}')
|
||||
line_count += 1
|
||||
all_lines.append(row[0])
|
||||
line_count += 1
|
||||
print(f'Processed {line_count} lines.')
|
||||
return all_lines
|
||||
|
||||
def load_xml_file(self, filename, col_value):
|
||||
|
||||
|
||||
@@ -2,20 +2,21 @@ name: smell-detector
|
||||
channels:
|
||||
- defaults
|
||||
dependencies:
|
||||
- ca-certificates=2023.01.10=hecd8cb5_0
|
||||
- libcxx=14.0.6=h9765a3e_0
|
||||
- libffi=3.4.2=hecd8cb5_6
|
||||
- ncurses=6.4=hcec6c5f_0
|
||||
- openssl=1.1.1t=hca72f7f_0
|
||||
- pip=23.0.1=py38hecd8cb5_0
|
||||
- python=3.8.16=h218abb5_3
|
||||
- readline=8.2=hca72f7f_0
|
||||
- setuptools=66.0.0=py38hecd8cb5_0
|
||||
- sqlite=3.41.2=h6c40b1e_0
|
||||
- tk=8.6.12=h5d9f67b_0
|
||||
- wheel=0.38.4=py38hecd8cb5_0
|
||||
- xz=5.2.10=h6c40b1e_1
|
||||
- zlib=1.2.13=h4dc903c_0
|
||||
- ca-certificates=2023.01.10
|
||||
- libcxx=14.0.6
|
||||
- libffi=3.4.2
|
||||
- ncurses=6.4
|
||||
- openssl=1.1.1t
|
||||
- pip=23.0.1
|
||||
- python=3.8.16
|
||||
- readline=8.2
|
||||
- setuptools=66.0.0
|
||||
- sqlite=3.41.2
|
||||
- tk=8.6.12
|
||||
- wheel=0.38.4
|
||||
- xz=5.2.10
|
||||
- zlib=1.2.13
|
||||
- spacy-model-en_core_web_sm=3.3.0
|
||||
- pip:
|
||||
- aiohttp==3.8.4
|
||||
- aiosignal==1.3.1
|
||||
@@ -39,7 +40,7 @@ dependencies:
|
||||
- datasets==2.10.1
|
||||
- dill==0.3.6
|
||||
- docker-pycreds==0.4.0
|
||||
- en-core-web-sm==3.3.0
|
||||
# - en-core-web-sm==3.3.0
|
||||
- exceptiongroup==1.1.1
|
||||
- fairscale==0.4.6
|
||||
- filelock==3.7.1
|
||||
|
||||
@@ -12,17 +12,30 @@ client = None
|
||||
|
||||
|
||||
|
||||
# write csv file
|
||||
# generate parsing trees
|
||||
#conda activate smell-detector
|
||||
# python /home/koebuntu/LLPTE/existing_research/paska/source/get-parsing-trees/get_cparsingtrees.py "/home/koebuntu/LLPTE/existing_research/paska/example/input_koen/" "/home/koebuntu/LLPTE/existing_research/paska/example/output_koen/"
|
||||
# check smell with paska
|
||||
#java -jar smell_detector.jar /home/koebuntu/LLPTE/existing_research/paska/example/output_koen/ /home/koebuntu/LLPTE/existing_research/paska/example/smells_koen/ /home/koebuntu/LLPTE/existing_research/paska/example/english-left3words-distsim.tagger
|
||||
|
||||
|
||||
#/home/koebuntu/LLPTE/existing_research/paska/example/smells_koen/
|
||||
|
||||
|
||||
|
||||
paska_tool = Paska_tool()
|
||||
paska_tool.check_rimay_requirement("When an order cancellation message is received from the System-A then Reason must be displayed in the Sytem-B GUI field 'Reason of Cancellation'.")
|
||||
paska_tool.check_rimay_requirement("When the System-A receives a rejection message from System-B, Then it must transform it to the corresponding XML message type and sent it to the System-C.")
|
||||
|
||||
#all gherkin data.
|
||||
csv_importer = GherkinData() #GherkinData
|
||||
all_acceptance_criteria = csv_importer.load()
|
||||
# csv_importer = GherkinData() #GherkinData
|
||||
# all_acceptance_criteria = csv_importer.load()
|
||||
|
||||
for scenario in all_acceptance_criteria:
|
||||
print(scenario["scenario_name"])
|
||||
print(scenario["content"])
|
||||
print("=================")
|
||||
# for scenario in all_acceptance_criteria:
|
||||
# print(scenario["scenario_name"])
|
||||
# print(scenario["content"])
|
||||
# print("=================")
|
||||
|
||||
#setup_ui()
|
||||
#ui.run()
|
||||
|
||||
+69
-4
@@ -1,10 +1,75 @@
|
||||
import shutil
|
||||
from subprocess import PIPE, Popen
|
||||
import csv
|
||||
import os
|
||||
import uuid
|
||||
|
||||
|
||||
class Paska_tool():
|
||||
|
||||
def __init__(self) -> None:
|
||||
pass
|
||||
self.worskpace_dir = "/home/koebuntu/LLPTE"
|
||||
self.base_dir =f"{self.worskpace_dir}/existing_research/paska"
|
||||
self.preproces_tool = f"{self.base_dir}/source/get-parsing-trees/get_cparsingtrees.py"
|
||||
self.java_tool = f"{self.base_dir}/smell_detector.jar"
|
||||
self.pos_tagger_file = f"{self.worskpace_dir}/english-left3words-distsim.tagger"
|
||||
|
||||
self.output_folder = f"{self.worskpace_dir}/generated_data/"
|
||||
self.input_folder_req = f"{self.output_folder}/input_requirements/"
|
||||
self.output_folder_pre = f"{self.output_folder}/output_pre/"
|
||||
self.output_folder_smells = f"{self.output_folder}/output_smells/"
|
||||
|
||||
|
||||
self.__init_directories()
|
||||
|
||||
def __init_directories(self):
|
||||
shutil.rmtree(self.output_folder)
|
||||
os.mkdir(self.output_folder)
|
||||
os.mkdir(self.input_folder_req)
|
||||
os.mkdir(self.output_folder_pre)
|
||||
os.mkdir(self.output_folder_smells)
|
||||
|
||||
def write_input_file(self, id: str, requirement: str):
|
||||
#"RQSVV.024";"If System-A has successfully performed all the validation rules, then System-A must set the state of the Settlement Request to 'Valid'."
|
||||
#- Input: A csv text file containing requirements. Each requirement should have the following fields separated by the separator ";":
|
||||
# - Requirement ID (String)
|
||||
# - Requirement (String)
|
||||
|
||||
# field names
|
||||
# Define the data to be written to the CSV file
|
||||
data = [
|
||||
[id, requirement]
|
||||
]
|
||||
|
||||
# name of csv file
|
||||
filename = self.input_folder_req+"generated_rimay.csv"
|
||||
|
||||
# Write data to the CSV file with a pipe delimiter
|
||||
with open(filename, 'w', newline='') as csvfile:
|
||||
csvwriter = csv.writer(csvfile, delimiter=';', quoting=csv.QUOTE_ALL)
|
||||
csvwriter.writerows(data)
|
||||
|
||||
def run_process(self, input_args: list):
|
||||
process = Popen(input_args, stdout=PIPE, stderr=PIPE) #["conda activate smell-detector"]+
|
||||
result = process.communicate()
|
||||
print("Result STDOUT: "+ result[0].decode('utf-8'))
|
||||
print("Result STDERR: "+ result[1].decode('utf-8'))
|
||||
|
||||
|
||||
def preprocess_paska(self):
|
||||
all_args = ["python", self.preproces_tool, self.input_folder_req, self.output_folder_pre]
|
||||
self.run_process(all_args)
|
||||
|
||||
def start_paska_tool(self):
|
||||
all_args = ["java", "-jar", self.java_tool, self.output_folder_pre, self.output_folder_smells, self.pos_tagger_file]
|
||||
self.run_process(all_args)
|
||||
|
||||
|
||||
def check_rimay_requirement(self, requirement: str):
|
||||
self.write_input_file(uuid.uuid4(), requirement)
|
||||
self.preprocess_paska()
|
||||
self.start_paska_tool()
|
||||
|
||||
|
||||
|
||||
|
||||
def start_paska_tool():
|
||||
jar_file_path = "java -jar smell_detector.jar"
|
||||
|
||||
Reference in New Issue
Block a user