1/*
2 * Copyright (c) 2016-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#pragma once
30#include <stdint.h>
31#include <stddef.h>
32#include <kern/assert.h>
33#include <machine/limits.h>
34#include "lz4_assembly_select.h"
35#include "lz4_constants.h"
36
37#if CONFIG_IO_COMPRESSION_STATS
38#include <string.h>
39#define lz4_memcpy memcpy
40#else
41#define lz4_memcpy __builtin_memcpy
42#endif
43
44#pragma mark - Building blocks
45
46// Represents a position in the input stream
47typedef struct { uint32_t offset; uint32_t word; } lz4_hash_entry_t;
48static const size_t lz4_hash_table_size = LZ4_COMPRESS_HASH_ENTRIES * sizeof(lz4_hash_entry_t);
49
50// Worker function for lz4 encode. Underlies both the buffer and stream encode operations.
51// Performs lz4 encoding of up to 2gb of data, updates dst_ptr and src_ptr to point to the
52// first byte of output and input that couldn't be completely processed, respectively.
53//
54// If skip_final_literals is 0, the entire src buffer is encoded, by emitting a final sequence of literals
55// at the end of the compressed payload.
56//
57// If skip_final_literals is not 0, this final literal sequence is not emitted, and the src buffer is
58// partially encoded (the length of this literal sequence varies).
59extern void lz4_encode_2gb(uint8_t **dst_ptr, size_t dst_size,
60 const uint8_t **src_ptr, const uint8_t *src_begin, size_t src_size,
61 lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES], int skip_final_literals);
62
63extern int lz4_decode(uint8_t **dst_ptr, uint8_t *dst_begin, uint8_t *dst_end,
64 const uint8_t **src_ptr, const uint8_t *src_end);
65
66#if LZ4_ENABLE_ASSEMBLY_DECODE
67extern int lz4_decode_asm(uint8_t **dst_ptr, uint8_t *dst_begin, uint8_t *dst_end,
68 const uint8_t **src_ptr, const uint8_t *src_end);
69#endif
70
71#pragma mark - Buffer interfaces
72
73static const size_t lz4_encode_scratch_size = lz4_hash_table_size;
74static const size_t lz4_decode_scratch_size = 0;
75
76#pragma mark - Buffer interfaces (LZ4 RAW)
77
78size_t lz4raw_encode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size,
79 const uint8_t * __restrict src_buffer, size_t src_size,
80 lz4_hash_entry_t hash_table[LZ4_COMPRESS_HASH_ENTRIES]);
81
82size_t lz4raw_decode_buffer(uint8_t * __restrict dst_buffer, size_t dst_size,
83 const uint8_t * __restrict src_buffer, size_t src_size,
84 void * __restrict work __attribute__((unused)));
85
86typedef __attribute__((__ext_vector_type__(8))) uint8_t vector_uchar8;
87typedef __attribute__((__ext_vector_type__(16))) uint8_t vector_uchar16;
88typedef __attribute__((__ext_vector_type__(32))) uint8_t vector_uchar32;
89typedef __attribute__((__ext_vector_type__(64))) uint8_t vector_uchar64;
90typedef __attribute__((__ext_vector_type__(16), __aligned__(1))) uint8_t packed_uchar16;
91typedef __attribute__((__ext_vector_type__(32), __aligned__(1))) uint8_t packed_uchar32;
92typedef __attribute__((__ext_vector_type__(64), __aligned__(1))) uint8_t packed_uchar64;
93
94typedef __attribute__((__ext_vector_type__(4))) uint16_t vector_ushort4;
95typedef __attribute__((__ext_vector_type__(4), __aligned__(2))) uint16_t packed_ushort4;
96
97typedef __attribute__((__ext_vector_type__(2))) int32_t vector_int2;
98typedef __attribute__((__ext_vector_type__(4))) int32_t vector_int4;
99typedef __attribute__((__ext_vector_type__(8))) int32_t vector_int8;
100
101typedef __attribute__((__ext_vector_type__(4))) uint32_t vector_uint4;
102
103#define UTIL_FUNCTION static inline __attribute__((__always_inline__)) __attribute__((__overloadable__))
104
105// Load N bytes from unaligned location PTR
106UTIL_FUNCTION uint16_t
107load2(const void * ptr)
108{
109 uint16_t data; lz4_memcpy(&data, ptr, sizeof data); return data;
110}
111UTIL_FUNCTION uint32_t
112load4(const void * ptr)
113{
114 uint32_t data; lz4_memcpy(&data, ptr, sizeof data); return data;
115}
116UTIL_FUNCTION uint64_t
117load8(const void * ptr)
118{
119 uint64_t data; lz4_memcpy(&data, ptr, sizeof data); return data;
120}
121UTIL_FUNCTION vector_uchar16
122load16(const void * ptr)
123{
124 return (const vector_uchar16)*(const packed_uchar16 *)ptr;
125}
126UTIL_FUNCTION vector_uchar32
127load32(const void * ptr)
128{
129 return (const vector_uchar32)*(const packed_uchar32 *)ptr;
130}
131UTIL_FUNCTION vector_uchar64
132load64(const void * ptr)
133{
134 return (const vector_uchar64)*(const packed_uchar64 *)ptr;
135}
136
137// Store N bytes to unaligned location PTR
138UTIL_FUNCTION void
139store2(void * ptr, uint16_t data)
140{
141 lz4_memcpy(ptr, &data, sizeof data);
142}
143UTIL_FUNCTION void
144store4(void * ptr, uint32_t data)
145{
146 lz4_memcpy(ptr, &data, sizeof data);
147}
148UTIL_FUNCTION void
149store8(void * ptr, uint64_t data)
150{
151 lz4_memcpy(ptr, &data, sizeof data);
152}
153UTIL_FUNCTION void
154store16(void * ptr, vector_uchar16 data)
155{
156 *(packed_uchar16 *)ptr = (packed_uchar16)data;
157}
158UTIL_FUNCTION void
159store32(void * ptr, vector_uchar32 data)
160{
161 *(packed_uchar32 *)ptr = (packed_uchar32)data;
162}
163UTIL_FUNCTION void
164store64(void * ptr, vector_uchar64 data)
165{
166 *(packed_uchar64 *)ptr = (packed_uchar64)data;
167}
168
169// Load+Store N bytes from unaligned locations SRC to DST. No overlap allowed.
170UTIL_FUNCTION void
171copy8(void * dst, const void * src)
172{
173 store8(ptr: dst, data: load8(ptr: src));
174}
175UTIL_FUNCTION void
176copy16(void * dst, const void * src)
177{
178 *(packed_uchar16 *)dst = *(const packed_uchar16 *)src;
179}
180UTIL_FUNCTION void
181copy32(void * dst, const void * src)
182{
183 *(packed_uchar32 *)dst = *(const packed_uchar32 *)src;
184}
185
186#undef lz4_memcpy
187