|
| 1 | +--- a/Onboard/Appearance.py |
| 2 | ++++ b/Onboard/Appearance.py |
| 3 | +@@ -921,7 +921,7 @@ class ColorScheme(object): |
| 4 | + ColorScheme._parse_dom_node_item(node, item) |
| 5 | + return item |
| 6 | + |
| 7 | +- _key_ids_pattern = re.compile('[\w-]+(?:[.][\w-]+)?', re.UNICODE) |
| 8 | ++ _key_ids_pattern = re.compile(r'[\w-]+(?:[.][\w-]+)?', re.UNICODE) |
| 9 | + |
| 10 | + @staticmethod |
| 11 | + def _parse_key_group(node, used_keys): |
| 12 | +@@ -1063,7 +1063,7 @@ class ColorScheme(object): |
| 13 | + |
| 14 | + # read key ids |
| 15 | + text = "".join([n.data for n in group.childNodes]) |
| 16 | +- key_ids = [x for x in re.findall('\w+(?:[.][\w-]+)?', text) if x] |
| 17 | ++ key_ids = [x for x in re.findall(r'\w+(?:[.][\w-]+)?', text) if x] |
| 18 | + |
| 19 | + # check for duplicate key definitions |
| 20 | + for key_id in key_ids: |
| 21 | +--- a/Onboard/LayoutLoaderSVG.py |
| 22 | ++++ b/Onboard/LayoutLoaderSVG.py |
| 23 | +@@ -95,7 +95,7 @@ class LayoutLoaderSVG: |
| 24 | + self._layout_filename = "" |
| 25 | + self._color_scheme = None |
| 26 | + self._root_layout_dir = "" # path to svg files |
| 27 | +- self._layout_regex = re.compile("([^\(]+) (?: \( ([^\)]*) \) )?", |
| 28 | ++ self._layout_regex = re.compile(r"([^\(]+) (?: \( ([^\)]*) \) )?", |
| 29 | + re.VERBOSE) |
| 30 | + |
| 31 | + def load(self, vk, layout_filename, color_scheme): |
| 32 | +--- a/Onboard/SpellChecker.py |
| 33 | ++++ b/Onboard/SpellChecker.py |
| 34 | +@@ -321,7 +321,7 @@ class hunspell(SCBackend): |
| 35 | + def is_running(self): |
| 36 | + return not self._osk_hunspell is None |
| 37 | + |
| 38 | +- SPLITWORDS = re.compile("[^-_\s]+", re.UNICODE|re.DOTALL) |
| 39 | ++ SPLITWORDS = re.compile(r"[^-_\s]+", re.UNICODE|re.DOTALL) |
| 40 | + |
| 41 | + def query(self, text): |
| 42 | + """ |
| 43 | +--- a/Onboard/TextDomain.py |
| 44 | ++++ b/Onboard/TextDomain.py |
| 45 | +@@ -141,7 +141,7 @@ class TextDomain: |
| 46 | + |
| 47 | + # Split at whitespace to catch whole URLs/file names and |
| 48 | + # keep separators. |
| 49 | +- strings = re.split('(\s+)', context) |
| 50 | ++ strings = re.split(r'(\s+)', context) |
| 51 | + if strings: |
| 52 | + string = strings[-1] |
| 53 | + if self._url_parser.is_maybe_url(string): |
| 54 | +@@ -158,7 +158,7 @@ class TextDomain: |
| 55 | + |
| 56 | + |
| 57 | + def _search_valid_file_name(self, strings): |
| 58 | +- """ |
| 59 | ++ r""" |
| 60 | + Search for a valid filename backwards across separators. |
| 61 | + |
| 62 | + Doctests: |
| 63 | +@@ -174,17 +174,17 @@ class TextDomain: |
| 64 | + >>> with open(fn2, mode="w") as f: n = f.write("") |
| 65 | + |
| 66 | + # simple file in dir with spaces must return as filename |
| 67 | +- >>> strings = re.split('(\s+)', fn1) |
| 68 | ++ >>> strings = re.split(r'(\s+)', fn1) |
| 69 | + >>> "/test onboard" in d._search_valid_file_name(strings) |
| 70 | + True |
| 71 | + |
| 72 | + # file with spaces in dir with spaces must return as filename |
| 73 | +- >>> strings = re.split('(\s+)', fn2) |
| 74 | ++ >>> strings = re.split(r'(\s+)', fn2) |
| 75 | + >>> "/test onboard" in d._search_valid_file_name(strings) |
| 76 | + True |
| 77 | + |
| 78 | + # random string after a valid file must not be confused with a filename |
| 79 | +- >>> strings = re.split('(\s+)', fn2 + " no-file") |
| 80 | ++ >>> strings = re.split(r'(\s+)', fn2 + " no-file") |
| 81 | + >>> d._search_valid_file_name(strings) is None |
| 82 | + True |
| 83 | + """ |
| 84 | +@@ -288,7 +288,7 @@ class TextDomain: |
| 85 | + def handle_key_press(self, keycode, mod_mask): |
| 86 | + return True, None # entering_text, end_of_editing |
| 87 | + |
| 88 | +- _growth_sections_pattern = re.compile("[^\s?#@]+", re.DOTALL) |
| 89 | ++ _growth_sections_pattern = re.compile(r"[^\s?#@]+", re.DOTALL) |
| 90 | + |
| 91 | + def _split_growth_sections(self, text): |
| 92 | + """ |
| 93 | +@@ -444,11 +444,11 @@ class DomainTerminal(TextDomain): |
| 94 | + ( |
| 95 | + "^gdb$ ", |
| 96 | + "^>>> ", # python |
| 97 | +- "^In \[[0-9]*\]: ", # ipython |
| 98 | ++ r"^In \[[0-9]*\]: ", # ipython |
| 99 | + "^:", # vi command mode |
| 100 | + "^/", # vi search |
| 101 | +- "^\?", # vi reverse search |
| 102 | +- "\$ ", # generic prompt |
| 103 | ++ r"^\?", # vi reverse search |
| 104 | ++ r"\$ ", # generic prompt |
| 105 | + "# ", # root prompt |
| 106 | + "^.*?@.*?/.*?> " # fish |
| 107 | + ) |
| 108 | +@@ -456,7 +456,7 @@ class DomainTerminal(TextDomain): |
| 109 | + |
| 110 | + _prompt_blacklist_patterns = tuple(re.compile(p, re.UNICODE) for p in |
| 111 | + ( |
| 112 | +- "^\(.*\)`.*': ", # bash incremental search |
| 113 | ++ r"^\(.*\)`.*': ", # bash incremental search |
| 114 | + ) |
| 115 | + ) |
| 116 | + |
| 117 | +@@ -736,7 +736,7 @@ class PartialURLParser: |
| 118 | + _protocols = ["mailto", "apt"] |
| 119 | + _all_schemes = _schemes + _protocols |
| 120 | + |
| 121 | +- _url_pattern = re.compile("([\w-]+)|(\W+)", re.UNICODE) |
| 122 | ++ _url_pattern = re.compile(r"([\w-]+)|(\W+)", re.UNICODE) |
| 123 | + |
| 124 | + def iter_url(self, url): |
| 125 | + return self._url_pattern.finditer(url) |
| 126 | +--- a/Onboard/WordSuggestions.py |
| 127 | ++++ b/Onboard/WordSuggestions.py |
| 128 | +@@ -1250,8 +1250,8 @@ class WordSuggestions: |
| 129 | + return word_span |
| 130 | + return None |
| 131 | + |
| 132 | +- _section_begin_pattern = re.compile("\S*\s*$") |
| 133 | +- _section_end_pattern = re.compile("\S*(?=\s*)") |
| 134 | ++ _section_begin_pattern = re.compile(r"\S*\s*$") |
| 135 | ++ _section_end_pattern = re.compile(r"\S*(?=\s*)") |
| 136 | + |
| 137 | + def _get_section_before_span(self, insertion_span): |
| 138 | + """ |
| 139 | +--- a/Onboard/pypredict/lm_wrapper.py |
| 140 | ++++ b/Onboard/pypredict/lm_wrapper.py |
| 141 | +@@ -299,7 +299,7 @@ def split_tokens_at(tokens, split_indice |
| 142 | + |
| 143 | + |
| 144 | + SENTENCE_PATTERN = re.compile( \ |
| 145 | +- """ .*? |
| 146 | ++ r""" .*? |
| 147 | + (?: |
| 148 | + (?:[.;:!?](?:(?=[\s]) | \")) # punctuation |
| 149 | + | (?:\\s*\\n\\s*)+(?=[\\n]) # multiples newlines |
| 150 | +@@ -365,7 +365,7 @@ def split_sentences(text, disambiguate=F |
| 151 | + return sentences, spans |
| 152 | + |
| 153 | + |
| 154 | +-tokenize_pattern = """ |
| 155 | ++tokenize_pattern = r""" |
| 156 | + ( # <unk> |
| 157 | + (?:^|(?<=\s)) |
| 158 | + \S*(\S)\\2{{3,}}\S* # char repeated more than 3 times |
| 159 | +@@ -464,7 +464,7 @@ def tokenize_context(text): |
| 160 | + The result is ready for use in predict(). |
| 161 | + """ |
| 162 | + tokens, spans = tokenize_text(text, is_context = True) |
| 163 | +- if not re.match(""" |
| 164 | ++ if not re.match(r""" |
| 165 | + ^$ # empty string? |
| 166 | + | .*[-'´΄\w]$ # word at the end? |
| 167 | + | (?:^|.*\s)[|]=?$ # recognized operator? |
| 168 | +@@ -501,7 +501,7 @@ def read_order(filename, encoding=None): |
| 169 | + continue |
| 170 | + |
| 171 | + if data: # data section? |
| 172 | +- result = re.search("ngram (\d+)=\d+", line) |
| 173 | ++ result = re.search(r"ngram (\d+)=\d+", line) |
| 174 | + if result: |
| 175 | + if order is None: |
| 176 | + order = 0 |
| 177 | +@@ -621,7 +621,7 @@ def simulate_typing(query_model, learn_m |
| 178 | + context, spans = tokenize_context(". " + inputline) # simulate sentence begin |
| 179 | + prefix = context[len(context)-1] if context else "" |
| 180 | + prefix_to_end = sentence[len(inputline)-len(prefix):] |
| 181 | +- target_word = re.search("^([\w]|[-'])*", prefix_to_end, re.UNICODE).group() |
| 182 | ++ target_word = re.search(r"^([\w]|[-'])*", prefix_to_end, re.UNICODE).group() |
| 183 | + choices = query_model.predict(context, limit) |
| 184 | + |
| 185 | + if 0: # step mode for debugging |
| 186 | +--- a/Onboard/utils.py |
| 187 | ++++ b/Onboard/utils.py |
| 188 | +@@ -148,7 +148,7 @@ def get_keysym_from_name(name): |
| 189 | + return keysyms[name] |
| 190 | + |
| 191 | + def parse_key_combination(combo, avaliable_key_ids = None): |
| 192 | +- """ |
| 193 | ++ r""" |
| 194 | + Parses a key combination into a list of modifier masks and key_ids. |
| 195 | + The key-id part of the combo may contain a regex pattern. |
| 196 | + |
| 197 | +@@ -169,7 +169,7 @@ def parse_key_combination(combo, avaliab |
| 198 | + [('TAB', 5)] |
| 199 | + |
| 200 | + # regex |
| 201 | +- >>> parse_key_combination(["F\d+"], ["TAB", "F1", "F2", "F3", "F9"]) |
| 202 | ++ >>> parse_key_combination([r"F\d+"], ["TAB", "F1", "F2", "F3", "F9"]) |
| 203 | + [('F1', 0), ('F2', 0), ('F3', 0), ('F9', 0)] |
| 204 | + """ |
| 205 | + modifiers = combo[:-1] |
| 206 | +@@ -217,8 +217,8 @@ def run_script(script): |
| 207 | + def toprettyxml(domdoc): |
| 208 | + ugly_xml = domdoc.toprettyxml(indent=' ') |
| 209 | + # Join lines with text elements with their tag lines |
| 210 | +- pattern = re.compile('>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) |
| 211 | +- pretty_xml = pattern.sub('>\g<1></', ugly_xml) |
| 212 | ++ pattern = re.compile(r'>\n\s+([^<>\s].*?)\n\s+</', re.DOTALL) |
| 213 | ++ pretty_xml = pattern.sub(r'>\g<1></', ugly_xml) |
| 214 | + |
| 215 | + # Work around http://bugs.python.org/issue5752 |
| 216 | + pretty_xml = re.sub( |
| 217 | +@@ -353,7 +353,7 @@ class dictproperty(object): |
| 218 | + return self._proxy(obj, self._fget, self._fset, self._fdel) |
| 219 | + |
| 220 | + def unpack_name_value_list(_list, num_values=2, key_type = str): |
| 221 | +- """ |
| 222 | ++ r""" |
| 223 | + Converts a list of strings into a dict of tuples. |
| 224 | + Sample list: ['LWIN:label:super', ...] |
| 225 | + ":" in a value must be escaped as "\:" |
| 226 | +@@ -1539,7 +1539,7 @@ class XDGDirs: |
| 227 | + |
| 228 | + |
| 229 | + _tag_pattern = re.compile( |
| 230 | +- """(?: |
| 231 | ++ r"""(?: |
| 232 | + <[\w\-_]+ # tag |
| 233 | + (?:\s+[\w\-_]+=["'][^"']*["'])* # attributes |
| 234 | + /?> |
| 235 | +--- a/setup.py |
| 236 | ++++ b/setup.py |
| 237 | +@@ -115,7 +115,7 @@ def get_pkg_version(package): |
| 238 | + .format(repr(package), status), file=sys.stderr) |
| 239 | + sys.exit(2) |
| 240 | + |
| 241 | +- version = re.search('(?:(?:\d+)\.)+\d+', output).group() |
| 242 | ++ version = re.search(r'(?:(?:\d+)\.)+\d+', output).group() |
| 243 | + components = version.split(".") |
| 244 | + major, minor = int(components[0]), int(components[1]) |
| 245 | + revision = int(components[2]) if len(components) >= 3 else 0 |
0 commit comments