mirror of
https://github.com/outbackdingo/OpCore-Simplify.git
synced 2026-01-27 18:19:49 +00:00
Use Python's built-in replacements to avoid errors when using virtual environments
This commit is contained in:
@@ -303,7 +303,6 @@ REM Python found
|
||||
cls
|
||||
set "args=%*"
|
||||
set "args=!args:"=!"
|
||||
"!pypath!" -m pip install -r "!thisDir!\requirements.txt"
|
||||
if "!args!"=="" (
|
||||
"!pypath!" "!thisDir!!script_name!"
|
||||
) else (
|
||||
|
||||
@@ -286,7 +286,6 @@ main() {
|
||||
return 1
|
||||
fi
|
||||
# Found it - start our script and pass all args
|
||||
"$python" -m pip install -r "$dir/requirements.txt"
|
||||
"$python" "$dir/$target" "${args[@]}"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,19 +1,18 @@
|
||||
from Scripts.datasets import pci_data
|
||||
from Scripts import gpu_identifier
|
||||
from Scripts import utils
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
class AIDA64:
|
||||
def __init__(self):
|
||||
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
|
||||
self.encodings = ['utf-8', 'latin-1', 'ISO-8859-1']
|
||||
self.encodings = ["utf-8", "latin-1", "ISO-8859-1"]
|
||||
self.gpu_identifier = gpu_identifier.GPUIdentifier()
|
||||
self.utils = utils.Utils()
|
||||
|
||||
def try_open(self, file_path):
|
||||
for encoding in self.encodings:
|
||||
try:
|
||||
with open(file_path, 'r', encoding=encoding) as file:
|
||||
with open(file_path, "r", encoding=encoding) as file:
|
||||
return file.read()
|
||||
except UnicodeDecodeError:
|
||||
continue
|
||||
@@ -280,7 +279,7 @@ class AIDA64:
|
||||
if not device_description:
|
||||
device_id = device_props.get("Device ID", None)
|
||||
revision_id = device_props.get("Revision", None)[:-1] if device_props.get("Revision") else None
|
||||
hardware_id = 'USB\\VID_{}&PID_{}&REV_{}'.format(device_id[:4], device_id[5:], revision_id[:-1]) if device_id and revision_id else None
|
||||
hardware_id = "USB\\VID_{}&PID_{}&REV_{}".format(device_id[:4], device_id[5:], revision_id[:-1]) if device_id and revision_id else None
|
||||
|
||||
if hardware_id:
|
||||
device_description = self.utils.search_dict_iter(windows_devices, hardware_id + "&MI_00").get("Driver Description", None)
|
||||
@@ -412,16 +411,16 @@ class AIDA64:
|
||||
parsed_dmi = {}
|
||||
|
||||
for full_key, item_value in dmi_data.items():
|
||||
occurrence_suffix = ''
|
||||
occurrence_suffix = ""
|
||||
category_name = None
|
||||
|
||||
if '_#' in full_key:
|
||||
suffix_idx = full_key.index('_#')
|
||||
if "_#" in full_key:
|
||||
suffix_idx = full_key.index("_#")
|
||||
occurrence_suffix = full_key[suffix_idx:]
|
||||
full_key = full_key[:suffix_idx]
|
||||
|
||||
if ' / ' in full_key:
|
||||
category_idx = full_key.index(' / ')
|
||||
if " / " in full_key:
|
||||
category_idx = full_key.index(" / ")
|
||||
category_name = full_key[:category_idx]
|
||||
device_name = full_key[category_idx + 3:]
|
||||
|
||||
@@ -440,34 +439,35 @@ class AIDA64:
|
||||
for full_key, item_value in windows_devices.items():
|
||||
device_props = item_value.get("Device Properties", {})
|
||||
|
||||
# Update device properties with hardware ID if available
|
||||
if "Hardware ID" in device_props:
|
||||
device_props.update(self.parse_hardware_id(device_props.get("Hardware ID")))
|
||||
|
||||
# Extract category name from the full key
|
||||
category_name = full_key.split(" / ")[0]
|
||||
|
||||
# Initialize category dictionary if not already present
|
||||
if category_name not in parsed_windows_devices:
|
||||
parsed_windows_devices[category_name] = {}
|
||||
|
||||
# Extract device name from device properties
|
||||
device_name = device_props.get("Driver Description")
|
||||
|
||||
# Add device to category dictionary
|
||||
parsed_windows_devices[category_name][self.get_unique_key(device_name, parsed_windows_devices[category_name])] = device_props
|
||||
|
||||
return parsed_windows_devices
|
||||
|
||||
def get_inner_text(self, html_string):
|
||||
text = ""
|
||||
inside_tag = False
|
||||
for char in html_string:
|
||||
if char == "<":
|
||||
inside_tag = True
|
||||
elif char == ">":
|
||||
inside_tag = False
|
||||
elif not inside_tag:
|
||||
text += char
|
||||
return text.strip()
|
||||
|
||||
def html_to_dict(self, html_content):
|
||||
soup = BeautifulSoup(html_content, "html.parser")
|
||||
tables = soup.find_all('table')
|
||||
|
||||
if not tables:
|
||||
return {}
|
||||
|
||||
root = {}
|
||||
table_names = [
|
||||
def parse_html_to_json(self, html_content):
|
||||
parsed_data = {}
|
||||
table_titles = [
|
||||
"Summary",
|
||||
"DMI",
|
||||
"CPU",
|
||||
@@ -475,77 +475,62 @@ class AIDA64:
|
||||
"PCI Devices",
|
||||
"USB Devices"
|
||||
]
|
||||
table = None
|
||||
|
||||
for table_content in tables:
|
||||
# Find the table header to identify the table
|
||||
pt_element = table_content.find("td", class_="pt")
|
||||
if pt_element:
|
||||
table = pt_element.text.strip()
|
||||
elif table in table_names:
|
||||
root[table] = {}
|
||||
stack = [(root[table], -1)] # Stack holds (current_dict, current_level)
|
||||
start_index = html_content.index("</div>") + len("</div>")
|
||||
|
||||
for title in table_titles:
|
||||
title_marker = f">{title}<"
|
||||
if title_marker not in html_content[start_index:]:
|
||||
raise Exception("Your AIDA64 report is missing some information. Please revise it according to the provided guidelines")
|
||||
|
||||
title_index = html_content[start_index:].index(title_marker)
|
||||
table_start_index = start_index + title_index + html_content[start_index+title_index:].index("\n")
|
||||
table_end_index = table_start_index + 1 + html_content[table_start_index:].index("</TABLE>") + len("</TABLE>")
|
||||
table_html = html_content[table_start_index:table_end_index].strip()
|
||||
|
||||
lines = str(table_content).strip().splitlines()
|
||||
parsed_data[title] = {}
|
||||
stack = [(parsed_data[title], -1)]
|
||||
|
||||
for line in lines:
|
||||
if line.startswith('<tr>'):
|
||||
# Remove <tr> tag
|
||||
line = line.replace('<tr>', '')
|
||||
|
||||
# Calculate the current level based on the number of <td> tags
|
||||
level = (len(line) - len(line.lstrip('<td>'))) // 3 - 1
|
||||
if level < 1:
|
||||
continue
|
||||
|
||||
# Remove all <td> tags from the left
|
||||
while line.startswith("<td>"):
|
||||
line = line[line.find(">") + 1:]
|
||||
|
||||
if not line.startswith('<td') and '<td' in line:
|
||||
idx = line.index('<td')
|
||||
line = '{}{}{}{}'.format('<td>', line[:idx], '</td>', line[idx:])
|
||||
else:
|
||||
for line in table_html.splitlines():
|
||||
if line.startswith("<TR>"):
|
||||
line = line.replace("<TR>", "")
|
||||
|
||||
level = (len(line) - len(line.lstrip("<TD>"))) // 3 - 1
|
||||
if level < 1:
|
||||
continue
|
||||
|
||||
soup_line = BeautifulSoup(line, "html.parser")
|
||||
td_elements = soup_line.find_all('td')
|
||||
key = td_elements[0].text.strip()
|
||||
value = None if len(td_elements) < 2 else td_elements[-1].text.strip()
|
||||
while line.startswith("<TD>"):
|
||||
line = line[line.find(">") + 1:]
|
||||
else:
|
||||
continue
|
||||
|
||||
# Clean the key
|
||||
key = key.rstrip(":").strip("[]").strip()
|
||||
line = line.replace(" ", "")
|
||||
td_elements = line.split("<TD>")
|
||||
key = self.get_inner_text(td_elements[0])
|
||||
value = None if len(td_elements) < 2 else self.get_inner_text(td_elements[-1])
|
||||
|
||||
key = key.rstrip(":").strip("[]").strip()
|
||||
|
||||
# Pop from stack to find the correct parent dictionary
|
||||
while stack and stack[-1][1] >= level:
|
||||
stack.pop()
|
||||
while stack and stack[-1][1] >= level:
|
||||
stack.pop()
|
||||
|
||||
current_dict = stack[-1][0]
|
||||
key = self.get_unique_key(key, current_dict)
|
||||
|
||||
# Add the new key-value pair
|
||||
current_dict = stack[-1][0]
|
||||
key = self.get_unique_key(key, current_dict)
|
||||
if value is None:
|
||||
new_dict = {}
|
||||
current_dict[key] = new_dict
|
||||
stack.append((new_dict, level))
|
||||
else:
|
||||
current_dict[key] = value
|
||||
|
||||
if value is None:
|
||||
new_dict = {}
|
||||
current_dict[key] = new_dict
|
||||
stack.append((new_dict, level))
|
||||
else:
|
||||
if '<td class="cc">' not in line:
|
||||
current_dict[key] = value
|
||||
else:
|
||||
if not current_dict.items():
|
||||
current_dict[key] = []
|
||||
current_dict[value] = []
|
||||
else:
|
||||
current_dict[list(current_dict.keys())[0]].append(key)
|
||||
current_dict[list(current_dict.keys())[1]].append(value)
|
||||
start_index = table_end_index
|
||||
|
||||
if len(table_names) != len(root):
|
||||
raise Exception("Your AIDA64 report is missing some information. Please revise it according to the provided guidelines")
|
||||
return root
|
||||
return parsed_data
|
||||
|
||||
def dump(self, report_path):
|
||||
html_content = self.try_open(report_path)
|
||||
report_dict = self.html_to_dict(html_content)
|
||||
report_dict = self.parse_html_to_json(html_content)
|
||||
|
||||
dmi = self.parse_dmi(report_dict.get("DMI", {}))
|
||||
windows_devices = self.parse_windows_devices(report_dict.get("Windows Devices", {}))
|
||||
|
||||
@@ -1,19 +1,14 @@
|
||||
from Scripts import resource_fetcher
|
||||
from Scripts import utils
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
|
||||
load_dotenv()
|
||||
|
||||
class Github:
|
||||
def __init__(self):
|
||||
self.utils = utils.Utils()
|
||||
# Load the GitHub token from environment variables
|
||||
self.github_token = os.getenv("GITHUB_TOKEN")
|
||||
# Set the headers for GitHub API requests
|
||||
self.headers = {
|
||||
"Accept": "application/vnd.github+json",
|
||||
"#Authorization": "token {}".format(self.github_token),
|
||||
"#Authorization": "token GITHUB_TOKEN",
|
||||
"X-GitHub-Api-Version": "2022-11-28",
|
||||
}
|
||||
self.fetcher = resource_fetcher.ResourceFetcher(self.headers)
|
||||
|
||||
@@ -1,19 +1,26 @@
|
||||
import requests
|
||||
import sys
|
||||
import ssl
|
||||
import os
|
||||
import certifi
|
||||
import plistlib
|
||||
import json
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
from urllib.request import urlopen, Request
|
||||
else:
|
||||
import urllib2
|
||||
from urllib2 import urlopen, Request
|
||||
|
||||
class ResourceFetcher:
|
||||
def __init__(self, headers = None):
|
||||
def __init__(self, headers=None):
|
||||
self.request_headers = headers
|
||||
self.buffer_size = 16*1024
|
||||
self.buffer_size = 16 * 1024
|
||||
self.ssl_context = self.create_ssl_context()
|
||||
|
||||
def create_ssl_context(self):
|
||||
try:
|
||||
cafile = ssl.get_default_verify_paths().openssl_cafile
|
||||
if not os.path.exists(cafile):
|
||||
import certifi
|
||||
cafile = certifi.where()
|
||||
ssl_context = ssl.create_default_context(cafile=cafile)
|
||||
except Exception as e:
|
||||
@@ -22,35 +29,29 @@ class ResourceFetcher:
|
||||
return ssl_context
|
||||
|
||||
def fetch_and_parse_content(self, resource_url, content_type=None):
|
||||
response = requests.get(resource_url, headers=self.request_headers, verify=self.ssl_context.verify_mode != ssl.CERT_NONE)
|
||||
response.raise_for_status()
|
||||
request = Request(resource_url, headers=self.request_headers or {})
|
||||
with urlopen(request, context=self.ssl_context) as response:
|
||||
content = response.read()
|
||||
if content_type == 'json':
|
||||
return json.loads(content)
|
||||
elif content_type == 'plist':
|
||||
return plistlib.loads(content)
|
||||
else:
|
||||
return content.decode('utf-8')
|
||||
|
||||
if content_type == 'json':
|
||||
return response.json()
|
||||
elif content_type == 'plist':
|
||||
return plistlib.loads(response.content)
|
||||
else:
|
||||
return response.text
|
||||
|
||||
def download_and_save_file(self, resource_url, destination_path, extract=True):
|
||||
with requests.get(resource_url, headers=self.request_headers, stream=True, verify=self.ssl_context.verify_mode != ssl.CERT_NONE) as response:
|
||||
response.raise_for_status()
|
||||
|
||||
try:
|
||||
total_size = int(response.headers.get('Content-Length', -1))
|
||||
except ValueError:
|
||||
total_size = -1
|
||||
|
||||
def download_and_save_file(self, resource_url, destination_path):
|
||||
request = Request(resource_url, headers=self.request_headers or {})
|
||||
with urlopen(request, context=self.ssl_context) as response:
|
||||
total_size = response.length
|
||||
bytes_downloaded = 0
|
||||
|
||||
print("Download from {}".format(resource_url))
|
||||
|
||||
|
||||
with open(destination_path, 'wb') as file_writer:
|
||||
while True:
|
||||
chunk = response.raw.read(self.buffer_size)
|
||||
chunk = response.read(self.buffer_size)
|
||||
if not chunk:
|
||||
break
|
||||
file_writer.write(chunk)
|
||||
bytes_downloaded += len(chunk)
|
||||
if total_size != -1:
|
||||
print("Downloaded {:.2f} MB of {:.2f} MB".format(bytes_downloaded / (1024 * 1024), total_size / (1024 * 1024)), end='\r')
|
||||
if total_size:
|
||||
print("Downloaded {:.2f} MB of {:.2f} MB".format(bytes_downloaded / (1024 * 1024), total_size / (1024 * 1024)), end='\r')
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
bs4
|
||||
requests
|
||||
python-dotenv
|
||||
pytz
|
||||
Reference in New Issue
Block a user