login page

This commit is contained in:
Alicja Cięciwa
2020-10-27 12:57:58 +01:00
commit cb8886666c
8545 changed files with 1082463 additions and 0 deletions

View File

@@ -0,0 +1,11 @@
MIT License
Copyright (c) 2017, Jannis Gebauer
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
"""Top-level package for Dependency Parser."""
__author__ = """Jannis Gebauer"""
__email__ = 'jay@pyup.io'
__version__ = '0.5.0'
from .parser import parse

View File

@@ -0,0 +1,196 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
import json
from . import filetypes, errors
class Dependency(object):
"""
"""
def __init__(self, name, specs, line, source="pypi", meta={}, extras=[], line_numbers=None, index_server=None, hashes=(), dependency_type=None, section=None):
"""
:param name:
:param specs:
:param line:
:param source:
:param extras:
:param line_numbers:
:param index_server:
:param hashes:
:param dependency_type:
"""
self.name = name
self.key = name.lower().replace("_", "-")
self.specs = specs
self.line = line
self.source = source
self.meta = meta
self.line_numbers = line_numbers
self.index_server = index_server
self.hashes = hashes
self.dependency_type = dependency_type
self.extras = extras
self.section = section
def __str__(self): # pragma: no cover
"""
:return:
"""
return "Dependency({name}, {specs}, {line})".format(
name=self.name,
specs=self.specs,
line=self.line
)
def serialize(self):
"""
:return:
"""
return {
"name": self.name,
"specs": self.specs,
"line": self.line,
"source": self.source,
"meta": self.meta,
"line_numbers": self.line_numbers,
"index_server": self.index_server,
"hashes": self.hashes,
"dependency_type": self.dependency_type,
"extras": self.extras,
"section": self.section
}
@classmethod
def deserialize(cls, d):
"""
:param d:
:return:
"""
return cls(**d)
@property
def full_name(self):
"""
:return:
"""
if self.extras:
return "{}[{}]".format(self.name, ",".join(self.extras))
return self.name
class DependencyFile(object):
"""
"""
def __init__(self, content, path=None, sha=None, file_type=None, marker=((), ()), parser=None):
"""
:param content:
:param path:
:param sha:
:param marker:
:param file_type:
:param parser:
"""
self.content = content
self.file_type = file_type
self.path = path
self.sha = sha
self.marker = marker
self.dependencies = []
self.resolved_files = []
self.is_valid = False
self.file_marker, self.line_marker = marker
if parser:
self.parser = parser
else:
from . import parser as parser_class
if file_type is not None:
if file_type == filetypes.requirements_txt:
self.parser = parser_class.RequirementsTXTParser
elif file_type == filetypes.tox_ini:
self.parser = parser_class.ToxINIParser
elif file_type == filetypes.conda_yml:
self.parser = parser_class.CondaYMLParser
elif file_type == filetypes.pipfile:
self.parser = parser_class.PipfileParser
elif file_type == filetypes.pipfile_lock:
self.parser = parser_class.PipfileLockParser
elif file_type == filetypes.setup_cfg:
self.parser = parser_class.SetupCfgParser
elif path is not None:
if path.endswith(".txt"):
self.parser = parser_class.RequirementsTXTParser
elif path.endswith(".yml"):
self.parser = parser_class.CondaYMLParser
elif path.endswith(".ini"):
self.parser = parser_class.ToxINIParser
elif path.endswith("Pipfile"):
self.parser = parser_class.PipfileParser
elif path.endswith("Pipfile.lock"):
self.parser = parser_class.PipfileLockParser
elif path.endswith("setup.cfg"):
self.parser = parser_class.SetupCfgParser
if not hasattr(self, "parser"):
raise errors.UnknownDependencyFileError
self.parser = self.parser(self)
def serialize(self):
"""
:return:
"""
return {
"file_type": self.file_type,
"content": self.content,
"path": self.path,
"sha": self.sha,
"dependencies": [dep.serialize() for dep in self.dependencies]
}
@classmethod
def deserialize(cls, d):
"""
:param d:
:return:
"""
dependencies = [Dependency.deserialize(dep) for dep in d.pop("dependencies", [])]
instance = cls(**d)
instance.dependencies = dependencies
return instance
def json(self): # pragma: no cover
"""
:return:
"""
return json.dumps(self.serialize(), indent=2)
def parse(self):
"""
:return:
"""
if self.parser.is_marked_file:
self.is_valid = False
return self
self.parser.parse()
self.is_valid = len(self.dependencies) > 0 or len(self.resolved_files) > 0
return self

View File

@@ -0,0 +1,8 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
class UnknownDependencyFileError(Exception):
"""
"""
pass

View File

@@ -0,0 +1,9 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
requirements_txt = "requirements.txt"
conda_yml = "conda.yml"
setup_cfg = "setup.cfg"
tox_ini = "tox.ini"
pipfile = "Pipfile"
pipfile_lock = "Pipfile.lock"

View File

@@ -0,0 +1,427 @@
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from collections import OrderedDict
import re
import yaml
from io import StringIO
from six.moves.configparser import SafeConfigParser, NoOptionError
from .regex import URL_REGEX, HASH_REGEX
from .dependencies import DependencyFile, Dependency
from packaging.requirements import Requirement as PackagingRequirement, InvalidRequirement
from . import filetypes
import toml
from packaging.specifiers import SpecifierSet
import json
# this is a backport from setuptools 26.1
def setuptools_parse_requirements_backport(strs): # pragma: no cover
# Copyright (C) 2016 Jason R Coombs <jaraco@jaraco.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Yield ``Requirement`` objects for each specification in `strs`
`strs` must be a string, or a (possibly-nested) iterable thereof.
"""
# create a steppable iterator, so we can handle \-continuations
def yield_lines(strs):
"""Yield non-empty/non-comment lines of a string or sequence"""
if isinstance(strs, str):
for s in strs.splitlines():
s = s.strip()
# skip blank lines/comments
if s and not s.startswith('#'):
yield s
else:
for ss in strs:
for s in yield_lines(ss):
yield s
lines = iter(yield_lines(strs))
for line in lines:
# Drop comments -- a hash without a space may be in a URL.
if ' #' in line:
line = line[:line.find(' #')]
# If there is a line continuation, drop it, and append the next line.
if line.endswith('\\'):
line = line[:-2].strip()
line += next(lines)
yield PackagingRequirement(line)
class RequirementsTXTLineParser(object):
"""
"""
@classmethod
def parse(cls, line):
"""
:param line:
:return:
"""
try:
# setuptools requires a space before the comment. If this isn't the case, add it.
if "\t#" in line:
parsed, = setuptools_parse_requirements_backport(line.replace("\t#", "\t #"))
else:
parsed, = setuptools_parse_requirements_backport(line)
except InvalidRequirement:
return None
dep = Dependency(
name=parsed.name,
specs=parsed.specifier,
line=line,
extras=parsed.extras,
dependency_type=filetypes.requirements_txt
)
return dep
class Parser(object):
"""
"""
def __init__(self, obj):
"""
:param obj:
"""
self.obj = obj
self._lines = None
def iter_lines(self, lineno=0):
"""
:param lineno:
:return:
"""
for line in self.lines[lineno:]:
yield line
@property
def lines(self):
"""
:return:
"""
if self._lines is None:
self._lines = self.obj.content.splitlines()
return self._lines
@property
def is_marked_file(self):
"""
:return:
"""
for n, line in enumerate(self.iter_lines()):
for marker in self.obj.file_marker:
if marker in line:
return True
if n >= 2:
break
return False
def is_marked_line(self, line):
"""
:param line:
:return:
"""
for marker in self.obj.line_marker:
if marker in line:
return True
return False
@classmethod
def parse_hashes(cls, line):
"""
:param line:
:return:
"""
hashes = []
for match in re.finditer(HASH_REGEX, line):
hashes.append(line[match.start():match.end()])
return re.sub(HASH_REGEX, "", line).strip(), hashes
@classmethod
def parse_index_server(cls, line):
"""
:param line:
:return:
"""
matches = URL_REGEX.findall(line)
if matches:
url = matches[0]
return url if url.endswith("/") else url + "/"
return None
@classmethod
def resolve_file(cls, file_path, line):
"""
:param file_path:
:param line:
:return:
"""
line = line.replace("-r ", "").replace("--requirement ", "")
parts = file_path.split("/")
if " #" in line:
line = line.split("#")[0].strip()
if len(parts) == 1:
return line
return "/".join(parts[:-1]) + "/" + line
class RequirementsTXTParser(Parser):
"""
"""
def parse(self):
"""
Parses a requirements.txt-like file
"""
index_server = None
for num, line in enumerate(self.iter_lines()):
line = line.rstrip()
if not line:
continue
if line.startswith('#'):
# comments are lines that start with # only
continue
if line.startswith('-i') or \
line.startswith('--index-url') or \
line.startswith('--extra-index-url'):
# this file is using a private index server, try to parse it
index_server = self.parse_index_server(line)
continue
elif self.obj.path and (line.startswith('-r') or line.startswith('--requirement')):
self.obj.resolved_files.append(self.resolve_file(self.obj.path, line))
elif line.startswith('-f') or line.startswith('--find-links') or \
line.startswith('--no-index') or line.startswith('--allow-external') or \
line.startswith('--allow-unverified') or line.startswith('-Z') or \
line.startswith('--always-unzip'):
continue
elif self.is_marked_line(line):
continue
else:
try:
parseable_line = line
# multiline requirements are not parseable
if "\\" in line:
parseable_line = line.replace("\\", "")
for next_line in self.iter_lines(num + 1):
parseable_line += next_line.strip().replace("\\", "")
line += "\n" + next_line
if "\\" in next_line:
continue
break
# ignore multiline requirements if they are marked
if self.is_marked_line(parseable_line):
continue
hashes = []
if "--hash" in parseable_line:
parseable_line, hashes = Parser.parse_hashes(parseable_line)
req = RequirementsTXTLineParser.parse(parseable_line)
if req:
req.hashes = hashes
req.index_server = index_server
# replace the requirements line with the 'real' line
req.line = line
self.obj.dependencies.append(req)
except ValueError:
continue
class ToxINIParser(Parser):
"""
"""
def parse(self):
"""
:return:
"""
parser = SafeConfigParser()
parser.readfp(StringIO(self.obj.content))
for section in parser.sections():
try:
content = parser.get(section=section, option="deps")
for n, line in enumerate(content.splitlines()):
if self.is_marked_line(line):
continue
if line:
req = RequirementsTXTLineParser.parse(line)
if req:
req.dependency_type = self.obj.file_type
self.obj.dependencies.append(req)
except NoOptionError:
pass
class CondaYMLParser(Parser):
"""
"""
def parse(self):
"""
:return:
"""
try:
data = yaml.safe_load(self.obj.content)
if data and 'dependencies' in data and isinstance(data['dependencies'], list):
for dep in data['dependencies']:
if isinstance(dep, dict) and 'pip' in dep:
for n, line in enumerate(dep['pip']):
if self.is_marked_line(line):
continue
req = RequirementsTXTLineParser.parse(line)
if req:
req.dependency_type = self.obj.file_type
self.obj.dependencies.append(req)
except yaml.YAMLError:
pass
class PipfileParser(Parser):
def parse(self):
"""
Parse a Pipfile (as seen in pipenv)
:return:
"""
try:
data = toml.loads(self.obj.content, _dict=OrderedDict)
if data:
for package_type in ['packages', 'dev-packages']:
if package_type in data:
for name, specs in data[package_type].items():
# skip on VCS dependencies
if not isinstance(specs, str):
continue
if specs == '*':
specs = ''
self.obj.dependencies.append(
Dependency(
name=name, specs=SpecifierSet(specs),
dependency_type=filetypes.pipfile,
line=''.join([name, specs]),
section=package_type
)
)
except (toml.TomlDecodeError, IndexError) as e:
pass
class PipfileLockParser(Parser):
def parse(self):
"""
Parse a Pipfile.lock (as seen in pipenv)
:return:
"""
try:
data = json.loads(self.obj.content, object_pairs_hook=OrderedDict)
if data:
for package_type in ['default', 'develop']:
if package_type in data:
for name, meta in data[package_type].items():
# skip VCS dependencies
if 'version' not in meta:
continue
specs = meta['version']
hashes = meta['hashes']
self.obj.dependencies.append(
Dependency(
name=name, specs=SpecifierSet(specs),
dependency_type=filetypes.pipfile_lock,
hashes=hashes,
line=''.join([name, specs]),
section=package_type
)
)
except ValueError:
pass
class SetupCfgParser(Parser):
def parse(self):
parser = SafeConfigParser()
parser.readfp(StringIO(self.obj.content))
for section in parser.values():
if section.name == 'options':
options = 'install_requires', 'setup_requires', 'test_require'
for name in options:
content = section.get(name)
if not content:
continue
self._parse_content(content)
elif section.name == 'options.extras_require':
for content in section.values():
self._parse_content(content)
def _parse_content(self, content):
for n, line in enumerate(content.splitlines()):
if self.is_marked_line(line):
continue
if line:
req = RequirementsTXTLineParser.parse(line)
if req:
req.dependency_type = self.obj.file_type
self.obj.dependencies.append(req)
def parse(content, file_type=None, path=None, sha=None, marker=((), ()), parser=None):
"""
:param content:
:param file_type:
:param path:
:param sha:
:param marker:
:param parser:
:return:
"""
dep_file = DependencyFile(
content=content,
path=path,
sha=sha,
marker=marker,
file_type=file_type,
parser=parser
)
return dep_file.parse()

View File

@@ -0,0 +1,39 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
# see https://gist.github.com/dperini/729294
URL_REGEX = re.compile(
# protocol identifier
"(?:(?:https?|ftp)://)"
# user:pass authentication
"(?:\S+(?::\S*)?@)?"
"(?:"
# IP address exclusion
# private & local networks
"(?!(?:10|127)(?:\.\d{1,3}){3})"
"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})"
"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})"
# IP address dotted notation octets
# excludes loopback network 0.0.0.0
# excludes reserved space >= 224.0.0.0
# excludes network & broadcast addresses
# (first & last IP address of each class)
"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])"
"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}"
"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))"
"|"
# host name
"(?:(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)"
# domain name
"(?:\.(?:[a-z\u00a1-\uffff0-9]-?)*[a-z\u00a1-\uffff0-9]+)*"
# TLD identifier
"(?:\.(?:[a-z\u00a1-\uffff]{2,}))"
")"
# port number
"(?::\d{2,5})?"
# resource path
"(?:/\S*)?",
re.UNICODE)
HASH_REGEX = r"--hash[=| ][\w]+:[\w]+"

View File

@@ -0,0 +1,119 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import re
import json
import tempfile
import toml
import os
class RequirementsTXTUpdater(object):
SUB_REGEX = r"^{}(?=\s*\r?\n?$)"
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
"""
Updates the requirement to the latest version for the given content and adds hashes
if neccessary.
:param content: str, content
:return: str, updated content
"""
new_line = "{name}{spec}{version}".format(name=dependency.full_name, spec=spec, version=version)
appendix = ''
# leave environment markers intact
if ";" in dependency.line:
# condense multiline, split out the env marker, strip comments and --hashes
new_line += ";" + dependency.line.splitlines()[0].split(";", 1)[1] \
.split("#")[0].split("--hash")[0].rstrip()
# add the comment
if "#" in dependency.line:
# split the line into parts: requirement and comment
parts = dependency.line.split("#")
requirement, comment = parts[0], "#".join(parts[1:])
# find all whitespaces between the requirement and the comment
whitespaces = (hex(ord('\t')), hex(ord(' ')))
trailing_whitespace = ''
for c in requirement[::-1]:
if hex(ord(c)) in whitespaces:
trailing_whitespace += c
else:
break
appendix += trailing_whitespace + "#" + comment
# if this is a hashed requirement, add a multiline break before the comment
if dependency.hashes and not new_line.endswith("\\"):
new_line += " \\"
# if this is a hashed requirement, add the hashes
if hashes:
for n, new_hash in enumerate(hashes):
new_line += "\n --hash={method}:{hash}".format(
method=new_hash['method'],
hash=new_hash['hash']
)
# append a new multiline break if this is not the last line
if len(hashes) > n + 1:
new_line += " \\"
new_line += appendix
regex = cls.SUB_REGEX.format(re.escape(dependency.line))
return re.sub(regex, new_line, content, flags=re.MULTILINE)
class CondaYMLUpdater(RequirementsTXTUpdater):
SUB_REGEX = r"{}(?=\s*\r?\n?$)"
class ToxINIUpdater(CondaYMLUpdater):
pass
class SetupCFGUpdater(CondaYMLUpdater):
pass
class PipfileUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = toml.loads(content)
if data:
for package_type in ['packages', 'dev-packages']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = "{spec}{version}".format(
spec=spec, version=version
)
try:
from pipenv.project import Project
except ImportError:
raise ImportError("Updating a Pipfile requires the pipenv extra to be installed. Install it with "
"pip install dparse[pipenv]")
pipfile = tempfile.NamedTemporaryFile(delete=False)
p = Project(chdir=False)
p.write_toml(data=data, path=pipfile.name)
data = open(pipfile.name).read()
os.remove(pipfile.name)
return data
class PipfileLockUpdater(object):
@classmethod
def update(cls, content, dependency, version, spec="==", hashes=()):
data = json.loads(content)
if data:
for package_type in ['default', 'develop']:
if package_type in data:
if dependency.full_name in data[package_type]:
data[package_type][dependency.full_name] = {
'hashes': [
"{method}:{hash}".format(
hash=h['hash'],
method=h['method']
) for h in hashes
],
'version': "{spec}{version}".format(
spec=spec, version=version
)
}
return json.dumps(data, indent=4, separators=(',', ': ')) + "\n"