• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python typing.BinaryIO类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中typing.BinaryIO的典型用法代码示例。如果您正苦于以下问题:Python BinaryIO类的具体用法?Python BinaryIO怎么用?Python BinaryIO使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了BinaryIO类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: load

def load(file_handle: typing.BinaryIO) -> TSerializable:
    """load(file) -> object

    This function reads a tnetstring from a file and parses it into a
    python object.  The file must support the read() method, and this
    function promises not to read more data than necessary.
    """
    #  Read the length prefix one char at a time.
    #  Note that the netstring spec explicitly forbids padding zeros.
    c = file_handle.read(1)
    if c == b"":  # we want to detect this special case.
        raise ValueError("not a tnetstring: empty file")
    data_length = b""
    while c.isdigit():
        data_length += c
        if len(data_length) > 9:
            raise ValueError("not a tnetstring: absurdly large length prefix")
        c = file_handle.read(1)
    if c != b":":
        raise ValueError("not a tnetstring: missing or invalid length prefix")

    data = file_handle.read(int(data_length))
    data_type = file_handle.read(1)[0]

    return parse(data_type, data)
开发者ID:StevenVanAcker,项目名称:mitmproxy,代码行数:25,代码来源:tnetstring.py


示例2: put

    def put(
        self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
    ) -> None:
        """Store a file."""
        subset = dict_subset(metadata, lambda k, v: k in (
            # We are not storing the 'file_name'
            'image_width', 'image_height', 'original_id', 'version'))
        self._convert_values_to_str(subset)
        if hasattr(bytes_io, 'seekable') and bytes_io.seekable():
            bytes_io.seek(0)

        # When botocore.response.StreamingBody is passed in as bytes_io,
        # the bucket.put_object() call below fails with
        # "AttributeError: 'StreamingBody' object has no attribute 'tell'"
        # so we have to read the stream, getting the bytes:
        if not hasattr(bytes_io, 'tell'):
            bytes_io = bytes_io.read()  # type: ignore

        result = self.bucket.put_object(
            Key=self._get_path(namespace, metadata),
            # done automatically by botocore:  ContentMD5=encoded_md5,
            ContentType=metadata['mime_type'],
            ContentLength=metadata['length'], Body=bytes_io, Metadata=subset)
        # print(result)
        return result
开发者ID:nandoflorestan,项目名称:keepluggable,代码行数:25,代码来源:amazon_s3.py


示例3: __init__

 def __init__(self, archive: BinaryIO, offset: int, length: int, prefix: bytes):
     archive.seek(offset)
     self.name = archive.name
     self.remaining = length
     self.sources = [cast(io.BufferedIOBase, archive)]
     if prefix:
         self.sources.insert(0, cast(io.BufferedIOBase, io.BytesIO(prefix)))
开发者ID:Lattyware,项目名称:unrpa,代码行数:7,代码来源:view.py


示例4: get_index

    def get_index(
        self, archive: BinaryIO, version: Optional[Version] = None
    ) -> Dict[str, ComplexIndexEntry]:
        if not version:
            version = self.version() if self.version else self.detect_version()

        offset = 0
        key: Optional[int] = None
        if self.offset_and_key:
            offset, key = self.offset_and_key
        else:
            offset, key = version.find_offset_and_key(archive)
        archive.seek(offset)
        index: Dict[bytes, IndexEntry] = pickle.loads(
            zlib.decompress(archive.read()), encoding="bytes"
        )
        if key is not None:
            normal_index = UnRPA.deobfuscate_index(key, index)
        else:
            normal_index = UnRPA.normalise_index(index)

        return {
            UnRPA.ensure_str_path(path).replace("/", os.sep): data
            for path, data in normal_index.items()
        }
开发者ID:Lattyware,项目名称:unrpa,代码行数:25,代码来源:__init__.py


示例5: save_to

 async def save_to(self, name: str, fd: BinaryIO):
     async with ClientSession() as client:
         async with client.post(self.get_url, data=name.encode("utf8")) as resp:
             assert resp.status == 200
             while True:
                 data = await resp.content.read(2 << 20)
                 if not data:
                     break
                 fd.write(data)
开发者ID:Mirantis,项目名称:ceph-monitoring,代码行数:9,代码来源:web_storage.py


示例6: read_offset_array

def read_offset_array(file: BinaryIO, count: int):
    """Read an array of offsets to null-terminated strings from the file."""
    cdmat_offsets = str_read(str(count) + 'i', file)
    arr = [None] * count  # type: List[str]

    for ind, off in enumerate(cdmat_offsets):
        file.seek(off)
        arr[ind] = read_nullstr(file)
    return arr
开发者ID:TeamSpen210,项目名称:srctools,代码行数:9,代码来源:mdl.py


示例7: download_into

def download_into(session: requests.Session,
                  url: str, file: BinaryIO, process_func=None) -> None:
  r = session.get(url, stream=True)
  length = int(r.headers.get('Content-Length') or 0)
  received = 0
  for chunk in r.iter_content(CHUNK_SIZE):
    received += len(chunk)
    file.write(chunk)
    if process_func:
      process_func(received, length)
  if not length and process_func:
    process_func(received, received)
开发者ID:archlinuxcn,项目名称:lilac,代码行数:12,代码来源:requestsutils.py


示例8: postprocess

 def postprocess(self, source: ArchiveView, sink: BinaryIO) -> None:
     if self.details:
         key, amount = self.details
         parts = []
         while amount > 0:
             part = source.read(amount)
             amount -= len(part)
             parts.append(part)
         sink.write(obfuscation_run(b"".join(parts), key))
     else:
         raise Exception("find_offset_and_key must be called before postprocess")
     for segment in iter(source.read1, b""):
         sink.write(segment)
开发者ID:Lattyware,项目名称:unrpa,代码行数:13,代码来源:zix.py


示例9: read_nullstr

def read_nullstr(file: BinaryIO, pos: int=None):
    """Read a null-terminated string from the file."""
    if pos is not None:
        if pos == 0:
            return ''
        file.seek(pos)

    text = []
    while True:
        char = file.read(1)
        if char == b'\0':
            return b''.join(text).decode('ascii')
        if not char:
            raise ValueError('Fell off end of file!')
        text.append(char)
开发者ID:TeamSpen210,项目名称:srctools,代码行数:15,代码来源:mdl.py


示例10: add_member_stream

 def add_member_stream( self,
     path: PurePosixPath, mtime: ArchiveMTime,
     content_stream: BinaryIO,
 ) -> None:
     content = content_stream.read()
     assert isinstance(content, bytes), type(content)
     return self.add_member_bytes(path, mtime, content)
开发者ID:tifv,项目名称:jeolm,代码行数:7,代码来源:archive.py


示例11: read_delimited_chunks

def read_delimited_chunks(infile: BinaryIO, chunk_size: int) -> Generator[bytes, None, None]:
    """Yield the contents of infile in chunk_size pieces ending at newlines.
    The individual pieces, except for the last one, end in newlines and
    are smaller than chunk_size if possible.

    Params:
        infile: stream to read from
        chunk_size: maximum size of each chunk

    Yields:
        chunk: chunk with maximum size of chunk_size if possible
    """
    leftover = b""

    while True:
        new_chunk = infile.read(chunk_size)
        chunks = split_chunks(leftover + new_chunk, chunk_size)
        leftover = b""
        # the last item in chunks has to be combined with the next chunk
        # read from the file because it may not actually stop at a
        # newline and to avoid very small chunks.
        if chunks:
            leftover = chunks[-1]
            chunks = chunks[:-1]
        for chunk in chunks:
            yield chunk

        if not new_chunk:
            if leftover:
                yield leftover
            break
开发者ID:razuz,项目名称:intelmq,代码行数:31,代码来源:splitreports.py


示例12: generate_reports

def generate_reports(report_template: Report, infile: BinaryIO, chunk_size: Optional[int],
                     copy_header_line: bool) -> Generator[Report, None, None]:
    """Generate reports from a template and input file, optionally split into chunks.

    If chunk_size is None, a single report is generated with the entire
    contents of infile as the raw data. Otherwise chunk_size should be
    an integer giving the maximum number of bytes in a chunk. The data
    read from infile is then split into chunks of this size at newline
    characters (see read_delimited_chunks). For each of the chunks, this
    function yields a copy of the report_template with that chunk as the
    value of the raw attribute.

    When splitting the data into chunks, if copy_header_line is true,
    the first line the file is read before chunking and then prepended
    to each of the chunks. This is particularly useful when splitting
    CSV files.

    The infile should be a file-like object. generate_reports uses only
    two methods, readline and read, with readline only called once and
    only if copy_header_line is true. Both methods should return bytes
    objects.

    Params:
        report_template: report used as template for all yielded copies
        infile: stream to read from
        chunk_size: maximum size of each chunk
        copy_header_line: copy the first line of the infile to each chunk

    Yields:
        report: a Report object holding the chunk in the raw field
    """
    if chunk_size is None:
        report = report_template.copy()
        data = infile.read()
        if data:
            report.add("raw", data, overwrite=True)
            yield report
    else:
        header = b""
        if copy_header_line:
            header = infile.readline()
        for chunk in read_delimited_chunks(infile, chunk_size):
            report = report_template.copy()
            report.add("raw", header + chunk, overwrite=True)
            yield report
开发者ID:certtools,项目名称:intelmq,代码行数:45,代码来源:splitreports.py


示例13: put

 def put(
     self, namespace: str, metadata: Dict[str, Any], bytes_io: BinaryIO,
 ) -> None:
     """Store a file (``bytes_io``) inside ``namespace``."""
     if bytes_io.tell():
         bytes_io.seek(0)
     outdir = self._dir_of(namespace)
     if not outdir.exists():
         outdir.mkdir(parents=True)  # Create namespace directory as needed
     outfile = outdir / self._get_filename(metadata)
     with open(str(outfile), mode='wb', buffering=MEGABYTE) as writer:
         while True:
             chunk = bytes_io.read(MEGABYTE)
             if chunk:
                 writer.write(chunk)
             else:
                 break
     assert outfile.lstat().st_size == metadata['length']
开发者ID:nandoflorestan,项目名称:keepluggable,代码行数:18,代码来源:local.py


示例14: _compute_md5

 def _compute_md5(
     self, bytes_io: BinaryIO, metadata: Dict[str, Any],
 ) -> None:
     from hashlib import md5
     two_megabytes = 1048576 * 2
     the_hash = md5()
     the_length = 0
     bytes_io.seek(0)
     while True:
         segment = bytes_io.read(two_megabytes)
         if segment == b'':
             break
         the_length += len(segment)
         the_hash.update(segment)
     metadata['md5'] = the_hash.hexdigest()
     previous_length = metadata.get('length')
     if previous_length is None:
         metadata['length'] = the_length
     else:
         assert previous_length == the_length, "Bug? File lengths {}, {} " \
             "don't match.".format(previous_length, the_length)
     bytes_io.seek(0)  # ...so it can be read again
开发者ID:nandoflorestan,项目名称:keepluggable,代码行数:22,代码来源:actions.py


示例15: mktar_from_dockerfile

def mktar_from_dockerfile(fileobject: BinaryIO) -> IO:
    """
    Create a zipped tar archive from a Dockerfile
    **Remember to close the file object**
    Args:
        fileobj: a Dockerfile
    Returns:
        a NamedTemporaryFile() object
    """

    f = tempfile.NamedTemporaryFile()
    t = tarfile.open(mode="w:gz", fileobj=f)

    if isinstance(fileobject, BytesIO):
        dfinfo = tarfile.TarInfo("Dockerfile")
        dfinfo.size = len(fileobject.getvalue())
        fileobject.seek(0)
    else:
        dfinfo = t.gettarinfo(fileobj=fileobject, arcname="Dockerfile")

    t.addfile(dfinfo, fileobject)
    t.close()
    f.seek(0)
    return f
开发者ID:paultag,项目名称:aiodocker,代码行数:24,代码来源:utils.py


示例16: parse_header

def parse_header(source: BinaryIO) -> Tuple[OFXHeaderType, str]:
    """
    Consume source; feed to appropriate class constructor which performs
    validation/type conversion on OFX header.

    Using header, locate/read/decode (but do not parse) OFX data body.

    Returns a 2-tuple of:
        * instance of OFXHeaderV1/OFXHeaderV2 containing parsed data, and
        * decoded text of OFX data body
    """
    # Skip any empty lines at the beginning
    while True:
        # OFX header is read by nice clean machines, not meatbags -
        # should not contain emoji, 漢字, or what have you.
        line = source.readline().decode("ascii")
        if line.strip():
            break

    # If the first non-empty line contains an XML declaration, it's OFX v2
    xml_match = XML_REGEX.match(line)
    if xml_match:
        # OFXv2 spec doesn't require newlines between XML declaration,
        # OFX declaration, and data elements; `line` may or may not
        # contain the latter two.
        #
        # Just rewind, read the whole file (it must be UTF-8 encoded per
        # the spec) and slice the OFX data body from the end of the
        # OFX declaration
        source.seek(0)
        decoded_source = source.read().decode(OFXHeaderV2.codec)
        header, header_end_index = OFXHeaderV2.parse(decoded_source)
        message = decoded_source[header_end_index:]
    else:
        # OFX v1
        rawheader = line + "\n"
        # First line is OFXHEADER; need to read next 8 lines for a fixed
        # total of 9 fields required by OFX v1 spec.
        for n in range(8):
            rawheader += source.readline().decode("ascii")
        header, header_end_index = OFXHeaderV1.parse(rawheader)

        #  Input source stream position has advanced to the beginning of
        #  the OFX body tag soup, which is where subsequent calls
        #  to read()/readlines() will pick up.
        #
        #  Decode the OFX data body according to the encoding declared
        #  in the OFX header
        message = source.read().decode(header.codec)

    return header, message.strip()
开发者ID:csingley,项目名称:ofxtools,代码行数:51,代码来源:header.py


示例17: iter_nullstr

def iter_nullstr(file: BinaryIO):
    """Read a null-terminated ASCII string from the file.
    
    This continuously yields strings, with empty strings 
    indicting the end of a section.
    """
    chars = bytearray()
    while True:
        char = file.read(1)
        if char == b'\x00':
            string = chars.decode('ascii')
            chars.clear()
            
            if string == ' ':  # Blank strings are saved as ' '
                yield ''
            elif string == '':
                return  # Actual blanks end the array.
            else:
                yield string
        elif char == b'':
            raise Exception('Reached EOF without null-terminator in {}!'.format(bytes(chars)))
        else:
            chars.extend(char)
开发者ID:TeamSpen210,项目名称:srctools,代码行数:23,代码来源:vpk.py


示例18: _rewrite_ownership_v0

def _rewrite_ownership_v0(
    input_file: BinaryIO, new_file: BinaryIO, header: MdvHeader, uid: int, gid: int
) -> None:
    entries_processed = 0
    entry_size = InodeMetadataV0.FORMAT.size
    for _ in range(header.entry_count):
        entries_processed += 1

        entry_data = input_file.read(entry_size)
        if len(entry_data) != entry_size:
            raise Exception("inode metadata table appears truncated")

        entry = InodeMetadataV0.parse(entry_data)
        entry.uid = uid
        entry.gid = gid
        new_file.write(entry.serialize())

    # Copy the remaining file contents as is.  This is normally all 0-filled data
    # that provides space for new entries to be written in the future.
    padding = input_file.read()
    new_file.write(padding)
开发者ID:facebookexperimental,项目名称:eden,代码行数:21,代码来源:inode_metadata.py


示例19: _read_sequences

    def _read_sequences(f: BinaryIO, off, count) -> List[MDLSequence]:
        """Split this off to decrease stack in main parse method."""
        f.seek(off)
        sequences = [None] * count  # type: List[MDLSequence]
        for i in range(count):
            start_pos = f.tell()
            (
                base_ptr,
                label_pos,
                act_name_pos,
                flags,
                _,  # Seems to be a pointer.
                act_weight,
                event_count,
                event_pos,
            ) = str_read('8i', f)
            bbox_min = str_readvec(f)
            bbox_max = str_readvec(f)

            # Skip 20 ints, 9 floats to get to keyvalues = 29*4 bytes
            # Then 8 unused ints.
            (
                keyvalue_pos,
                keyvalue_size,
            ) = str_read('116xii32x', f)
            end_pos = f.tell()

            f.seek(start_pos + event_pos)
            events = [None] * event_count  # type: List[SeqEvent]
            for j in range(event_count):
                event_start = f.tell()
                (
                    event_cycle,
                    event_index,
                    event_flags,
                    event_options,
                    event_nameloc,
                ) = str_read('fii64si', f)
                event_end = f.tell()

                # There are two event systems.
                if event_flags == 1 << 10:
                    # New system, name in the file.
                    event_name = read_nullstr(f, event_start + event_nameloc)
                    if event_name.isdigit():
                        try:
                            event_type = ANIM_EVENT_BY_INDEX[int(event_name)]
                        except KeyError:
                            raise ValueError('Unknown event index!')
                    else:
                        try:
                            event_type = ANIM_EVENT_BY_NAME[event_name]
                        except KeyError:
                            # NPC-specific events, declared dynamically.
                            event_type = event_name
                else:
                    # Old system, index.
                    try:
                        event_type = ANIM_EVENT_BY_INDEX[event_index]
                    except KeyError:
                        # raise ValueError('Unknown event index!')
                        print('Unknown: ', event_index, event_options.rstrip(b'\0'))
                        continue

                f.seek(event_end)
                events[j] = SeqEvent(
                    type=event_type,
                    cycle=event_cycle,
                    options=event_options.rstrip(b'\0').decode('ascii')
                )

            if keyvalue_size:
                keyvalues = read_nullstr(f, start_pos + keyvalue_pos)
            else:
                keyvalues = ''

            sequences[i] = MDLSequence(
                label=read_nullstr(f, start_pos + label_pos),
                act_name=read_nullstr(f, start_pos + act_name_pos),
                flags=flags,
                act_weight=act_weight,
                events=events,
                bbox_min=bbox_min,
                bbox_max=bbox_max,
                keyvalues=keyvalues,
            )

            f.seek(end_pos)

        return sequences
开发者ID:TeamSpen210,项目名称:srctools,代码行数:90,代码来源:mdl.py


示例20: _load

    def _load(self, f: BinaryIO):
        """Read data from the MDL file."""
        assert f.tell() == 0, "Doesn't begin at start?"
        if f.read(4) != b'IDST':
            raise ValueError('Not a model!')
        (
            self.version,
            name,
            file_len,
            # 4 bytes are unknown...
        ) = str_read('i 4x 64s i', f)

        if not 44 <= self.version <= 49:
            raise ValueError('Unknown MDL version {}!'.format(self.version))

        self.name = name.rstrip(b'\0').decode('ascii')
        self.eye_pos = str_readvec(f)
        self.illum_pos = str_readvec(f)
        # Approx dimensions
        self.hull_min = str_readvec(f)
        self.hull_max = str_readvec(f)
        
        self.view_min = str_readvec(f)
        self.view_max = str_readvec(f)

        # Break up the reading a bit to limit the stack size.
        (
            flags,

            bone_count,
            bone_off,

            bone_controller_count, bone_controller_off,

            hitbox_count, hitbox_off,
            anim_count, anim_off,
            sequence_count, sequence_off,
        ) = str_read('11I', f)

        self.flags = Flags(flags)

        (
            activitylistversion, eventsindexed,

            texture_count, texture_offset,
            cdmat_count, cdmat_offset,
            
            skinref_count, skinref_ind, skinfamily_count,
            
            bodypart_count, bodypart_offset,
            attachment_count, attachment_offset,
        ) = str_read('13i', f)

        (
            localnode_count,
            localnode_index,
            localnode_name_index,
         
            # mstudioflexdesc_t
            flexdesc_count,
            flexdesc_index,
         
            # mstudioflexcontroller_t
            flexcontroller_count,
            flexcontroller_index,
         
            # mstudioflexrule_t
            flexrules_count,
            flexrules_index,
         
            # IK probably refers to inverse kinematics
            # mstudioikchain_t
            ikchain_count,
            ikchain_index,
         
            # Information about any "mouth" on the model for speech animation
            # More than one sounds pretty creepy.
            # mstudiomouth_t
            mouths_count, 
            mouths_index,
         
            # mstudioposeparamdesc_t
            localposeparam_count,
            localposeparam_index,
        ) = str_read('15I', f)

        # VDC:
        # For anyone trying to follow along, as of this writing,
        # the next "surfaceprop_index" value is at position 0x0134 (308)
        # from the start of the file.
        assert f.tell() == 308, 'Offset wrong? {} != 308 {}'.format(f.tell(), f)

        (
            # Surface property value (single null-terminated string)
            surfaceprop_index,
         
            # Unusual: In this one index comes first, then count.
            # Key-value data is a series of strings. If you can't find
            # what you're interested in, check the associated PHY file as well.
            keyvalue_index,
#.........这里部分代码省略.........
开发者ID:TeamSpen210,项目名称:srctools,代码行数:101,代码来源:mdl.py



注:本文中的typing.BinaryIO类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python typing.IO类代码示例发布时间:2022-05-27
下一篇:
Python typing.typevar函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap