• python运行hhsearch二进制命令的包装器类


    hhsearch 是 HMM-HMM(Hidden Markov Model to Hidden Markov Model)比对方法的一部分,属于 HMMER 软件套件。它用于进行蛋白质序列的高效比对,特别适用于检测远缘同源性。

    以下是 hhsearch 的一些主要特点和用途:

    1. HMM-HMM比对: hhsearch 使用隐藏马尔可夫模型(HMM)来表示蛋白质家族的模型。与传统的序列-序列比对方法不同,HMM-HMM比对考虑了氨基酸残基的多序列信息,使得在比对中能够更好地捕捉蛋白质家族的模式和结构。

    2. 检测远缘同源性: hhsearch 的一个主要优势是其能够检测到相对远离的同源关系。它在比对中引入了更多的信息,从而提高了对远缘同源蛋白的发现能力。

    3. 灵敏度和特异性: hhsearch 的设计旨在在维持高灵敏度的同时,减少假阳性的比对。这使得它在寻找结构和功能相似性时更为可靠。

    4. 数据库搜索: 用户可以使用 hhsearch 在大型蛋白质数据库中搜索与给定蛋白质序列相似的蛋白质。

    1. """Library to run HHsearch from Python."""
    2. import glob
    3. import os
    4. import subprocess
    5. from typing import Sequence, Optional, List, Iterable
    6. from absl import logging
    7. import contextlib
    8. import tempfile
    9. import dataclasses
    10. import contextlib
    11. import time
    12. import shutil
    13. import re
    14. @contextlib.contextmanager
    15. def timing(msg: str):
    16. logging.info('Started %s', msg)
    17. tic = time.time()
    18. yield
    19. toc = time.time()
    20. logging.info('Finished %s in %.3f seconds', msg, toc - tic)
    21. @dataclasses.dataclass(frozen=True)
    22. class TemplateHit:
    23. """Class representing a template hit."""
    24. index: int
    25. name: str
    26. aligned_cols: int
    27. sum_probs: Optional[float]
    28. query: str
    29. hit_sequence: str
    30. indices_query: List[int]
    31. indices_hit: List[int]
    32. @contextlib.contextmanager
    33. def tmpdir_manager(base_dir: Optional[str] = None):
    34. """Context manager that deletes a temporary directory on exit."""
    35. tmpdir = tempfile.mkdtemp(dir=base_dir)
    36. try:
    37. yield tmpdir
    38. finally:
    39. shutil.rmtree(tmpdir, ignore_errors=True)
    40. def parse_hhr(hhr_string: str) -> Sequence[TemplateHit]:
    41. """Parses the content of an entire HHR file."""
    42. lines = hhr_string.splitlines()
    43. # Each .hhr file starts with a results table, then has a sequence of hit
    44. # "paragraphs", each paragraph starting with a line 'No '. We
    45. # iterate through each paragraph to parse each hit.
    46. block_starts = [i for i, line in enumerate(lines) if line.startswith('No ')]
    47. hits = []
    48. if block_starts:
    49. block_starts.append(len(lines)) # Add the end of the final block.
    50. for i in range(len(block_starts) - 1):
    51. hits.append(_parse_hhr_hit(lines[block_starts[i]:block_starts[i + 1]]))
    52. return hits
    53. def _parse_hhr_hit(detailed_lines: Sequence[str]) -> TemplateHit:
    54. """Parses the detailed HMM HMM comparison section for a single Hit.
    55. This works on .hhr files generated from both HHBlits and HHSearch.
    56. Args:
    57. detailed_lines: A list of lines from a single comparison section between 2
    58. sequences (which each have their own HMM's)
    59. Returns:
    60. A dictionary with the information from that detailed comparison section
    61. Raises:
    62. RuntimeError: If a certain line cannot be processed
    63. """
    64. # Parse first 2 lines.
    65. number_of_hit = int(detailed_lines[0].split()[-1])
    66. name_hit = detailed_lines[1][1:]
    67. # Parse the summary line.
    68. pattern = (
    69. 'Probab=(.*)[\t ]*E-value=(.*)[\t ]*Score=(.*)[\t ]*Aligned_cols=(.*)[\t'
    70. ' ]*Identities=(.*)%[\t ]*Similarity=(.*)[\t ]*Sum_probs=(.*)[\t '
    71. ']*Template_Neff=(.*)')
    72. match = re.match(pattern, detailed_lines[2])
    73. if match is None:
    74. raise RuntimeError(
    75. 'Could not parse section: %s. Expected this: \n%s to contain summary.' %
    76. (detailed_lines, detailed_lines[2]))
    77. (_, _, _, aligned_cols, _, _, sum_probs, _) = [float(x)
    78. for x in match.groups()]
    79. # The next section reads the detailed comparisons. These are in a 'human
    80. # readable' format which has a fixed length. The strategy employed is to
    81. # assume that each block starts with the query sequence line, and to parse
    82. # that with a regexp in order to deduce the fixed length used for that block.
    83. query = ''
    84. hit_sequence = ''
    85. indices_query = []
    86. indices_hit = []
    87. length_block = None
    88. for line in detailed_lines[3:]:
    89. # Parse the query sequence line
    90. if (line.startswith('Q ') and not line.startswith('Q ss_dssp') and
    91. not line.startswith('Q ss_pred') and
    92. not line.startswith('Q Consensus')):
    93. # Thus the first 17 characters must be 'Q ', and we can parse
    94. # everything after that.
    95. # start sequence end total_sequence_length
    96. patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*([0-9]*) \([0-9]*\)'
    97. groups = _get_hhr_line_regex_groups(patt, line[17:])
    98. # Get the length of the parsed block using the start and finish indices,
    99. # and ensure it is the same as the actual block length.
    100. start = int(groups[0]) - 1 # Make index zero based.
    101. delta_query = groups[1]
    102. end = int(groups[2])
    103. num_insertions = len([x for x in delta_query if x == '-'])
    104. length_block = end - start + num_insertions
    105. assert length_block == len(delta_query)
    106. # Update the query sequence and indices list.
    107. query += delta_query
    108. _update_hhr_residue_indices_list(delta_query, start, indices_query)
    109. elif line.startswith('T '):
    110. # Parse the hit sequence.
    111. if (not line.startswith('T ss_dssp') and
    112. not line.startswith('T ss_pred') and
    113. not line.startswith('T Consensus')):
    114. # Thus the first 17 characters must be 'T ', and we can
    115. # parse everything after that.
    116. # start sequence end total_sequence_length
    117. patt = r'[\t ]*([0-9]*) ([A-Z-]*)[\t ]*[0-9]* \([0-9]*\)'
    118. groups = _get_hhr_line_regex_groups(patt, line[17:])
    119. start = int(groups[0]) - 1 # Make index zero based.
    120. delta_hit_sequence = groups[1]
    121. assert length_block == len(delta_hit_sequence)
    122. # Update the hit sequence and indices list.
    123. hit_sequence += delta_hit_sequence
    124. _update_hhr_residue_indices_list(delta_hit_sequence, start, indices_hit)
    125. return TemplateHit(
    126. index=number_of_hit,
    127. name=name_hit,
    128. aligned_cols=int(aligned_cols),
    129. sum_probs=sum_probs,
    130. query=query,
    131. hit_sequence=hit_sequence,
    132. indices_query=indices_query,
    133. indices_hit=indices_hit,
    134. )
    135. def _get_hhr_line_regex_groups(
    136. regex_pattern: str, line: str) -> Sequence[Optional[str]]:
    137. match = re.match(regex_pattern, line)
    138. if match is None:
    139. raise RuntimeError(f'Could not parse query line {line}')
    140. return match.groups()
    141. def _update_hhr_residue_indices_list(
    142. sequence: str, start_index: int, indices_list: List[int]):
    143. """Computes the relative indices for each residue with respect to the original sequence."""
    144. counter = start_index
    145. for symbol in sequence:
    146. if symbol == '-':
    147. indices_list.append(-1)
    148. else:
    149. indices_list.append(counter)
    150. counter += 1
    151. class HHSearch:
    152. """Python wrapper of the HHsearch binary."""
    153. def __init__(self,
    154. *,
    155. binary_path: str,
    156. databases: Sequence[str],
    157. maxseq: int = 1_000_000):
    158. """Initializes the Python HHsearch wrapper.
    159. Args:
    160. binary_path: The path to the HHsearch executable.
    161. databases: A sequence of HHsearch database paths. This should be the
    162. common prefix for the database files (i.e. up to but not including
    163. _hhm.ffindex etc.)
    164. maxseq: The maximum number of rows in an input alignment. Note that this
    165. parameter is only supported in HHBlits version 3.1 and higher.
    166. Raises:
    167. RuntimeError: If HHsearch binary not found within the path.
    168. """
    169. self.binary_path = binary_path
    170. self.databases = databases
    171. self.maxseq = maxseq
    172. #for database_path in self.databases:
    173. # if not glob.glob(database_path + '_*'):
    174. # logging.error('Could not find HHsearch database %s', database_path)
    175. # raise ValueError(f'Could not find HHsearch database {database_path}')
    176. @property
    177. def output_format(self) -> str:
    178. return 'hhr'
    179. @property
    180. def input_format(self) -> str:
    181. return 'a3m'
    182. def query(self, a3m: str) -> str:
    183. """Queries the database using HHsearch using a given a3m."""
    184. with tmpdir_manager() as query_tmp_dir:
    185. input_path = os.path.join(query_tmp_dir, 'query.a3m')
    186. hhr_path = os.path.join(query_tmp_dir, 'output.hhr')
    187. with open(input_path, 'w') as f:
    188. f.write(a3m)
    189. db_cmd = []
    190. for db_path in self.databases:
    191. db_cmd.append('-d')
    192. db_cmd.append(db_path)
    193. cmd = [self.binary_path,
    194. '-i', input_path,
    195. '-o', hhr_path,
    196. '-maxseq', str(self.maxseq)
    197. ] + db_cmd
    198. print("cmd:",cmd)
    199. logging.info('Launching subprocess "%s"', ' '.join(cmd))
    200. process = subprocess.Popen(
    201. cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
    202. with timing('HHsearch query'):
    203. stdout, stderr = process.communicate()
    204. retcode = process.wait()
    205. if retcode:
    206. # Stderr is truncated to prevent proto size errors in Beam.
    207. raise RuntimeError(
    208. 'HHSearch failed:\nstdout:\n%s\n\nstderr:\n%s\n' % (
    209. stdout.decode('utf-8'), stderr[:100_000].decode('utf-8')))
    210. with open(hhr_path) as f:
    211. hhr = f.read()
    212. return hhr
    213. def get_template_hits(self,
    214. output_string: str,
    215. input_sequence: str) -> Sequence[TemplateHit]:
    216. """Gets parsed template hits from the raw string output by the tool."""
    217. del input_sequence # Used by hmmseach but not needed for hhsearch.
    218. return parse_hhr(output_string)
    219. def convert_stockholm_to_a3m (stockholm_format: str,
    220. max_sequences: Optional[int] = None,
    221. remove_first_row_gaps: bool = True) -> str:
    222. """Converts MSA in Stockholm format to the A3M format."""
    223. descriptions = {}
    224. sequences = {}
    225. reached_max_sequences = False
    226. for line in stockholm_format.splitlines():
    227. reached_max_sequences = max_sequences and len(sequences) >= max_sequences
    228. if line.strip() and not line.startswith(('#', '//')):
    229. # Ignore blank lines, markup and end symbols - remainder are alignment
    230. # sequence parts.
    231. seqname, aligned_seq = line.split(maxsplit=1)
    232. if seqname not in sequences:
    233. if reached_max_sequences:
    234. continue
    235. sequences[seqname] = ''
    236. sequences[seqname] += aligned_seq
    237. for line in stockholm_format.splitlines():
    238. if line[:4] == '#=GS':
    239. # Description row - example format is:
    240. # #=GS UniRef90_Q9H5Z4/4-78 DE [subseq from] cDNA: FLJ22755 ...
    241. columns = line.split(maxsplit=3)
    242. seqname, feature = columns[1:3]
    243. value = columns[3] if len(columns) == 4 else ''
    244. if feature != 'DE':
    245. continue
    246. if reached_max_sequences and seqname not in sequences:
    247. continue
    248. descriptions[seqname] = value
    249. if len(descriptions) == len(sequences):
    250. break
    251. # Convert sto format to a3m line by line
    252. a3m_sequences = {}
    253. if remove_first_row_gaps:
    254. # query_sequence is assumed to be the first sequence
    255. query_sequence = next(iter(sequences.values()))
    256. query_non_gaps = [res != '-' for res in query_sequence]
    257. for seqname, sto_sequence in sequences.items():
    258. # Dots are optional in a3m format and are commonly removed.
    259. out_sequence = sto_sequence.replace('.', '')
    260. if remove_first_row_gaps:
    261. out_sequence = ''.join(
    262. _convert_sto_seq_to_a3m(query_non_gaps, out_sequence))
    263. a3m_sequences[seqname] = out_sequence
    264. fasta_chunks = (f">{k} {descriptions.get(k, '')}\n{a3m_sequences[k]}"
    265. for k in a3m_sequences)
    266. return '\n'.join(fasta_chunks) + '\n' # Include terminating newline
    267. def _convert_sto_seq_to_a3m(
    268. query_non_gaps: Sequence[bool], sto_seq: str) -> Iterable[str]:
    269. for is_query_res_non_gap, sequence_res in zip(query_non_gaps, sto_seq):
    270. if is_query_res_non_gap:
    271. yield sequence_res
    272. elif sequence_res != '-':
    273. yield sequence_res.lower()
    274. if __name__ == "__main__":
    275. ### 1. 准备输入数据
    276. ## 输入序列先通过Jackhmmer多次迭代从uniref90,MGnify数据库搜索同源序列,输出的多序列比对文件(如globins4.sto),转化为a3m格式后,再通过hhsearch从pdb数据库中找到同源序列
    277. input_fasta_file = '/home/zheng/test/Q94K49.fasta'
    278. ## input_sequence
    279. with open(input_fasta_file) as f:
    280. input_sequence = f.read()
    281. test_templates_sto_file = "/home/zheng/test/Q94K49_aln.sto"
    282. with open(test_templates_sto_file) as f:
    283. test_templates_sto = f.read()
    284. ## sto格式转a3m格式()
    285. test_templates_a3m = convert_stockholm_to_a3m(test_templates_sto)
    286. hhsearch_binary_path = "/home/zheng/software/hhsuite-3.3.0-SSE2-Linux/bin/hhsearch"
    287. ### 2.类实例化
    288. # scop70_1.75文件名前缀
    289. scop70_database_path = "/home/zheng/database/scop70_1.75_hhsuite3/scop70_1.75"
    290. pdb70_database_path = "/home/zheng/database/pdb70_from_mmcif_latest/pdb70"
    291. #hhsuite数据库下载地址:https://wwwuser.gwdg.de/~compbiol/data/hhsuite/databases/hhsuite_dbs/
    292. ## 单一数据库
    293. #template_searcher = HHSearch(binary_path = hhsearch_binary_path,
    294. # databases = [scop70_database_path])
    295. ## 多个数据库
    296. database_lst = [scop70_database_path, pdb70_database_path]
    297. template_searcher = HHSearch(binary_path = hhsearch_binary_path,
    298. databases = database_lst)
    299. ### 3. 同源序列搜索
    300. ## 搜索结果返回.hhr文件字符串
    301. templates_result = template_searcher.query(test_templates_a3m)
    302. print(templates_result)
    303. ## pdb序列信息列表
    304. template_hits = template_searcher.get_template_hits(
    305. output_string=templates_result, input_sequence=input_sequence)
    306. print(template_hits)

  • 相关阅读:
    Deep Learning for Geophysics综述阅读(未完)
    【个人笔记js的原型理解】
    技术对接76
    转载 - 洞察问题本质,解决工作难题
    List的使用
    04UE4 C++ 入门【力和扭矩】
    机器学习笔记之隐马尔可夫模型(五)学习问题——EM算法
    【Ruby】怎样判断一个类是否有某个方法,一个实例是否有某个属性?
    激光视觉惯导融合的slam系统
    汇编反外挂
  • 原文地址:https://blog.csdn.net/qq_27390023/article/details/134526474