text.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. import gzip
  2. import re
  3. import secrets
  4. import textwrap
  5. import unicodedata
  6. from gzip import GzipFile
  7. from gzip import compress as gzip_compress
  8. from io import BytesIO
  9. from django.core.exceptions import SuspiciousFileOperation
  10. from django.utils.functional import SimpleLazyObject, keep_lazy_text, lazy
  11. from django.utils.regex_helper import _lazy_re_compile
  12. from django.utils.translation import gettext as _
  13. from django.utils.translation import gettext_lazy, pgettext
  14. @keep_lazy_text
  15. def capfirst(x):
  16. """Capitalize the first letter of a string."""
  17. if not x:
  18. return x
  19. if not isinstance(x, str):
  20. x = str(x)
  21. return x[0].upper() + x[1:]
  22. # ----- Begin security-related performance workaround -----
  23. # We used to have, below
  24. #
  25. # re_words = _lazy_re_compile(r"<[^>]+?>|([^<>\s]+)", re.S)
  26. #
  27. # But it was shown that this regex, in the way we use it here, has some
  28. # catastrophic edge-case performance features. Namely, when it is applied to
  29. # text with only open brackets "<<<...". The class below provides the services
  30. # and correct answers for the use cases, but in these edge cases does it much
  31. # faster.
  32. re_notag = _lazy_re_compile(r"([^<>\s]+)", re.S)
  33. re_prt = _lazy_re_compile(r"<|([^<>\s]+)", re.S)
  34. class WordsRegex:
  35. @staticmethod
  36. def search(text, pos):
  37. # Look for "<" or a non-tag word.
  38. partial = re_prt.search(text, pos)
  39. if partial is None or partial[1] is not None:
  40. return partial
  41. # "<" was found, look for a closing ">".
  42. end = text.find(">", partial.end(0))
  43. if end < 0:
  44. # ">" cannot be found, look for a word.
  45. return re_notag.search(text, pos + 1)
  46. else:
  47. # "<" followed by a ">" was found -- fake a match.
  48. end += 1
  49. return FakeMatch(text[partial.start(0) : end], end)
  50. class FakeMatch:
  51. __slots__ = ["_text", "_end"]
  52. def end(self, group=0):
  53. assert group == 0, "This specific object takes only group=0"
  54. return self._end
  55. def __getitem__(self, group):
  56. if group == 1:
  57. return None
  58. assert group == 0, "This specific object takes only group in {0,1}"
  59. return self._text
  60. def __init__(self, text, end):
  61. self._text, self._end = text, end
  62. # ----- End security-related performance workaround -----
  63. # Set up regular expressions.
  64. re_words = WordsRegex
  65. re_chars = _lazy_re_compile(r"<[^>]+?>|(.)", re.S)
  66. re_tag = _lazy_re_compile(r"<(/)?(\S+?)(?:(\s*/)|\s.*?)?>", re.S)
  67. re_newlines = _lazy_re_compile(r"\r\n|\r") # Used in normalize_newlines
  68. re_camel_case = _lazy_re_compile(r"(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))")
  69. @keep_lazy_text
  70. def wrap(text, width):
  71. """
  72. A word-wrap function that preserves existing line breaks. Expects that
  73. existing line breaks are posix newlines.
  74. Preserve all white space except added line breaks consume the space on
  75. which they break the line.
  76. Don't wrap long words, thus the output text may have lines longer than
  77. ``width``.
  78. """
  79. wrapper = textwrap.TextWrapper(
  80. width=width,
  81. break_long_words=False,
  82. break_on_hyphens=False,
  83. replace_whitespace=False,
  84. )
  85. result = []
  86. for line in text.splitlines():
  87. wrapped = wrapper.wrap(line)
  88. if not wrapped:
  89. # If `line` contains only whitespaces that are dropped, restore it.
  90. result.append(line)
  91. else:
  92. result.extend(wrapped)
  93. if text.endswith("\n"):
  94. # If `text` ends with a newline, preserve it.
  95. result.append("")
  96. return "\n".join(result)
  97. class Truncator(SimpleLazyObject):
  98. """
  99. An object used to truncate text, either by characters or words.
  100. When truncating HTML text (either chars or words), input will be limited to
  101. at most `MAX_LENGTH_HTML` characters.
  102. """
  103. # 5 million characters are approximately 4000 text pages or 3 web pages.
  104. MAX_LENGTH_HTML = 5_000_000
  105. def __init__(self, text):
  106. super().__init__(lambda: str(text))
  107. def add_truncation_text(self, text, truncate=None):
  108. if truncate is None:
  109. truncate = pgettext(
  110. "String to return when truncating text", "%(truncated_text)s…"
  111. )
  112. if "%(truncated_text)s" in truncate:
  113. return truncate % {"truncated_text": text}
  114. # The truncation text didn't contain the %(truncated_text)s string
  115. # replacement argument so just append it to the text.
  116. if text.endswith(truncate):
  117. # But don't append the truncation text if the current text already
  118. # ends in this.
  119. return text
  120. return "%s%s" % (text, truncate)
  121. def chars(self, num, truncate=None, html=False):
  122. """
  123. Return the text truncated to be no longer than the specified number
  124. of characters.
  125. `truncate` specifies what should be used to notify that the string has
  126. been truncated, defaulting to a translatable string of an ellipsis.
  127. """
  128. self._setup()
  129. length = int(num)
  130. text = unicodedata.normalize("NFC", self._wrapped)
  131. # Calculate the length to truncate to (max length - end_text length)
  132. truncate_len = length
  133. for char in self.add_truncation_text("", truncate):
  134. if not unicodedata.combining(char):
  135. truncate_len -= 1
  136. if truncate_len == 0:
  137. break
  138. if html:
  139. return self._truncate_html(length, truncate, text, truncate_len, False)
  140. return self._text_chars(length, truncate, text, truncate_len)
  141. def _text_chars(self, length, truncate, text, truncate_len):
  142. """Truncate a string after a certain number of chars."""
  143. s_len = 0
  144. end_index = None
  145. for i, char in enumerate(text):
  146. if unicodedata.combining(char):
  147. # Don't consider combining characters
  148. # as adding to the string length
  149. continue
  150. s_len += 1
  151. if end_index is None and s_len > truncate_len:
  152. end_index = i
  153. if s_len > length:
  154. # Return the truncated string
  155. return self.add_truncation_text(text[: end_index or 0], truncate)
  156. # Return the original string since no truncation was necessary
  157. return text
  158. def words(self, num, truncate=None, html=False):
  159. """
  160. Truncate a string after a certain number of words. `truncate` specifies
  161. what should be used to notify that the string has been truncated,
  162. defaulting to ellipsis.
  163. """
  164. self._setup()
  165. length = int(num)
  166. if html:
  167. return self._truncate_html(length, truncate, self._wrapped, length, True)
  168. return self._text_words(length, truncate)
  169. def _text_words(self, length, truncate):
  170. """
  171. Truncate a string after a certain number of words.
  172. Strip newlines in the string.
  173. """
  174. words = self._wrapped.split()
  175. if len(words) > length:
  176. words = words[:length]
  177. return self.add_truncation_text(" ".join(words), truncate)
  178. return " ".join(words)
  179. def _truncate_html(self, length, truncate, text, truncate_len, words):
  180. """
  181. Truncate HTML to a certain number of chars (not counting tags and
  182. comments), or, if words is True, then to a certain number of words.
  183. Close opened tags if they were correctly closed in the given HTML.
  184. Preserve newlines in the HTML.
  185. """
  186. if words and length <= 0:
  187. return ""
  188. size_limited = False
  189. if len(text) > self.MAX_LENGTH_HTML:
  190. text = text[: self.MAX_LENGTH_HTML]
  191. size_limited = True
  192. html4_singlets = (
  193. "br",
  194. "col",
  195. "link",
  196. "base",
  197. "img",
  198. "param",
  199. "area",
  200. "hr",
  201. "input",
  202. )
  203. # Count non-HTML chars/words and keep note of open tags
  204. pos = 0
  205. end_text_pos = 0
  206. current_len = 0
  207. open_tags = []
  208. regex = re_words if words else re_chars
  209. while current_len <= length:
  210. m = regex.search(text, pos)
  211. if not m:
  212. # Checked through whole string
  213. break
  214. pos = m.end(0)
  215. if m[1]:
  216. # It's an actual non-HTML word or char
  217. current_len += 1
  218. if current_len == truncate_len:
  219. end_text_pos = pos
  220. continue
  221. # Check for tag
  222. tag = re_tag.match(m[0])
  223. if not tag or current_len >= truncate_len:
  224. # Don't worry about non tags or tags after our truncate point
  225. continue
  226. closing_tag, tagname, self_closing = tag.groups()
  227. # Element names are always case-insensitive
  228. tagname = tagname.lower()
  229. if self_closing or tagname in html4_singlets:
  230. pass
  231. elif closing_tag:
  232. # Check for match in open tags list
  233. try:
  234. i = open_tags.index(tagname)
  235. except ValueError:
  236. pass
  237. else:
  238. # SGML: An end tag closes, back to the matching start tag,
  239. # all unclosed intervening start tags with omitted end tags
  240. open_tags = open_tags[i + 1 :]
  241. else:
  242. # Add it to the start of the open tags list
  243. open_tags.insert(0, tagname)
  244. truncate_text = self.add_truncation_text("", truncate)
  245. if current_len <= length:
  246. if size_limited and truncate_text:
  247. text += truncate_text
  248. return text
  249. out = text[:end_text_pos]
  250. if truncate_text:
  251. out += truncate_text
  252. # Close any tags still open
  253. for tag in open_tags:
  254. out += "</%s>" % tag
  255. # Return string
  256. return out
  257. @keep_lazy_text
  258. def get_valid_filename(name):
  259. """
  260. Return the given string converted to a string that can be used for a clean
  261. filename. Remove leading and trailing spaces; convert other spaces to
  262. underscores; and remove anything that is not an alphanumeric, dash,
  263. underscore, or dot.
  264. >>> get_valid_filename("john's portrait in 2004.jpg")
  265. 'johns_portrait_in_2004.jpg'
  266. """
  267. s = str(name).strip().replace(" ", "_")
  268. s = re.sub(r"(?u)[^-\w.]", "", s)
  269. if s in {"", ".", ".."}:
  270. raise SuspiciousFileOperation("Could not derive file name from '%s'" % name)
  271. return s
  272. @keep_lazy_text
  273. def get_text_list(list_, last_word=gettext_lazy("or")):
  274. """
  275. >>> get_text_list(['a', 'b', 'c', 'd'])
  276. 'a, b, c or d'
  277. >>> get_text_list(['a', 'b', 'c'], 'and')
  278. 'a, b and c'
  279. >>> get_text_list(['a', 'b'], 'and')
  280. 'a and b'
  281. >>> get_text_list(['a'])
  282. 'a'
  283. >>> get_text_list([])
  284. ''
  285. """
  286. if not list_:
  287. return ""
  288. if len(list_) == 1:
  289. return str(list_[0])
  290. return "%s %s %s" % (
  291. # Translators: This string is used as a separator between list elements
  292. _(", ").join(str(i) for i in list_[:-1]),
  293. str(last_word),
  294. str(list_[-1]),
  295. )
  296. @keep_lazy_text
  297. def normalize_newlines(text):
  298. """Normalize CRLF and CR newlines to just LF."""
  299. return re_newlines.sub("\n", str(text))
  300. @keep_lazy_text
  301. def phone2numeric(phone):
  302. """Convert a phone number with letters into its numeric equivalent."""
  303. char2number = {
  304. "a": "2",
  305. "b": "2",
  306. "c": "2",
  307. "d": "3",
  308. "e": "3",
  309. "f": "3",
  310. "g": "4",
  311. "h": "4",
  312. "i": "4",
  313. "j": "5",
  314. "k": "5",
  315. "l": "5",
  316. "m": "6",
  317. "n": "6",
  318. "o": "6",
  319. "p": "7",
  320. "q": "7",
  321. "r": "7",
  322. "s": "7",
  323. "t": "8",
  324. "u": "8",
  325. "v": "8",
  326. "w": "9",
  327. "x": "9",
  328. "y": "9",
  329. "z": "9",
  330. }
  331. return "".join(char2number.get(c, c) for c in phone.lower())
  332. def _get_random_filename(max_random_bytes):
  333. return b"a" * secrets.randbelow(max_random_bytes)
  334. def compress_string(s, *, max_random_bytes=None):
  335. compressed_data = gzip_compress(s, compresslevel=6, mtime=0)
  336. if not max_random_bytes:
  337. return compressed_data
  338. compressed_view = memoryview(compressed_data)
  339. header = bytearray(compressed_view[:10])
  340. header[3] = gzip.FNAME
  341. filename = _get_random_filename(max_random_bytes) + b"\x00"
  342. return bytes(header) + filename + compressed_view[10:]
  343. class StreamingBuffer(BytesIO):
  344. def read(self):
  345. ret = self.getvalue()
  346. self.seek(0)
  347. self.truncate()
  348. return ret
  349. # Like compress_string, but for iterators of strings.
  350. def compress_sequence(sequence, *, max_random_bytes=None):
  351. buf = StreamingBuffer()
  352. filename = _get_random_filename(max_random_bytes) if max_random_bytes else None
  353. with GzipFile(
  354. filename=filename, mode="wb", compresslevel=6, fileobj=buf, mtime=0
  355. ) as zfile:
  356. # Output headers...
  357. yield buf.read()
  358. for item in sequence:
  359. zfile.write(item)
  360. data = buf.read()
  361. if data:
  362. yield data
  363. yield buf.read()
  364. # Expression to match some_token and some_token="with spaces" (and similarly
  365. # for single-quoted strings).
  366. smart_split_re = _lazy_re_compile(
  367. r"""
  368. ((?:
  369. [^\s'"]*
  370. (?:
  371. (?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
  372. [^\s'"]*
  373. )+
  374. ) | \S+)
  375. """,
  376. re.VERBOSE,
  377. )
  378. def smart_split(text):
  379. r"""
  380. Generator that splits a string by spaces, leaving quoted phrases together.
  381. Supports both single and double quotes, and supports escaping quotes with
  382. backslashes. In the output, strings will keep their initial and trailing
  383. quote marks and escaped quotes will remain escaped (the results can then
  384. be further processed with unescape_string_literal()).
  385. >>> list(smart_split(r'This is "a person\'s" test.'))
  386. ['This', 'is', '"a person\\\'s"', 'test.']
  387. >>> list(smart_split(r"Another 'person\'s' test."))
  388. ['Another', "'person\\'s'", 'test.']
  389. >>> list(smart_split(r'A "\"funky\" style" test.'))
  390. ['A', '"\\"funky\\" style"', 'test.']
  391. """
  392. for bit in smart_split_re.finditer(str(text)):
  393. yield bit[0]
  394. @keep_lazy_text
  395. def unescape_string_literal(s):
  396. r"""
  397. Convert quoted string literals to unquoted strings with escaped quotes and
  398. backslashes unquoted::
  399. >>> unescape_string_literal('"abc"')
  400. 'abc'
  401. >>> unescape_string_literal("'abc'")
  402. 'abc'
  403. >>> unescape_string_literal('"a \"bc\""')
  404. 'a "bc"'
  405. >>> unescape_string_literal("'\'ab\' c'")
  406. "'ab' c"
  407. """
  408. if not s or s[0] not in "\"'" or s[-1] != s[0]:
  409. raise ValueError("Not a string literal: %r" % s)
  410. quote = s[0]
  411. return s[1:-1].replace(r"\%s" % quote, quote).replace(r"\\", "\\")
  412. @keep_lazy_text
  413. def slugify(value, allow_unicode=False):
  414. """
  415. Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
  416. dashes to single dashes. Remove characters that aren't alphanumerics,
  417. underscores, or hyphens. Convert to lowercase. Also strip leading and
  418. trailing whitespace, dashes, and underscores.
  419. """
  420. value = str(value)
  421. if allow_unicode:
  422. value = unicodedata.normalize("NFKC", value)
  423. else:
  424. value = (
  425. unicodedata.normalize("NFKD", value)
  426. .encode("ascii", "ignore")
  427. .decode("ascii")
  428. )
  429. value = re.sub(r"[^\w\s-]", "", value.lower())
  430. return re.sub(r"[-\s]+", "-", value).strip("-_")
  431. def camel_case_to_spaces(value):
  432. """
  433. Split CamelCase and convert to lowercase. Strip surrounding whitespace.
  434. """
  435. return re_camel_case.sub(r" \1", value).strip().lower()
  436. def _format_lazy(format_string, *args, **kwargs):
  437. """
  438. Apply str.format() on 'format_string' where format_string, args,
  439. and/or kwargs might be lazy.
  440. """
  441. return format_string.format(*args, **kwargs)
  442. format_lazy = lazy(_format_lazy, str)