2019-07-01 21:52:02 +02:00
# pragma once
2018-09-22 02:14:26 +02:00
# include "../rsx_cache.h"
2018-10-28 14:59:39 +01:00
# include "texture_cache_predictor.h"
2018-09-22 02:14:26 +02:00
# include "TextureUtils.h"
# include <list>
2018-09-26 00:14:10 +02:00
# include <unordered_set>
2018-09-22 02:14:26 +02:00
# include <atomic>
namespace rsx
{
/**
* Helper enums / structs
*/
enum invalidation_chain_policy
{
invalidation_chain_none , // No chaining: Only sections that overlap the faulting page get invalidated.
invalidation_chain_full , // Full chaining: Sections overlapping the faulting page get invalidated, as well as any sections overlapping invalidated sections.
invalidation_chain_nearby // Invalidations chain if they are near to the fault (<X pages away)
} ;
enum invalidation_chain_direction
{
chain_direction_both ,
chain_direction_forward , // Only higher-base-address sections chain (unless they overlap the fault)
chain_direction_backward , // Only lower-base-address pages chain (unless they overlap the fault)
} ;
enum texture_create_flags
{
default_component_order = 0 ,
native_component_order = 1 ,
swapped_native_component_order = 2 ,
} ;
enum memory_read_flags
{
flush_always = 0 ,
flush_once = 1
} ;
struct invalidation_cause {
2019-02-27 19:26:22 +01:00
enum enum_type
{
2018-09-22 02:14:26 +02:00
invalid = 0 ,
read ,
deferred_read ,
write ,
deferred_write ,
2018-10-19 00:22:00 +02:00
unmap , // fault range is being unmapped
reprotect , // we are going to reprotect the fault range
2019-02-27 19:26:22 +01:00
superseded_by_fbo , // used by texture_cache::locked_memory_region
committed_as_fbo // same as superseded_by_fbo but without locking or preserving page flags
2018-09-22 02:14:26 +02:00
} cause ;
2018-10-19 00:22:00 +02:00
constexpr bool valid ( ) const
2018-09-22 02:14:26 +02:00
{
return cause ! = invalid ;
}
2018-10-19 00:22:00 +02:00
constexpr bool is_read ( ) const
2018-09-22 02:14:26 +02:00
{
AUDIT ( valid ( ) ) ;
return ( cause = = read | | cause = = deferred_read ) ;
}
2018-10-19 00:22:00 +02:00
constexpr bool deferred_flush ( ) const
2018-09-22 02:14:26 +02:00
{
AUDIT ( valid ( ) ) ;
2018-10-19 00:22:00 +02:00
return ( cause = = deferred_read | | cause = = deferred_write ) ;
2018-09-22 02:14:26 +02:00
}
2018-10-19 00:22:00 +02:00
constexpr bool destroy_fault_range ( ) const
2018-09-22 02:14:26 +02:00
{
AUDIT ( valid ( ) ) ;
2018-10-19 00:22:00 +02:00
return ( cause = = unmap ) ;
2018-09-22 02:14:26 +02:00
}
2018-10-19 00:22:00 +02:00
constexpr bool keep_fault_range_protection ( ) const
2018-09-22 02:14:26 +02:00
{
2018-10-19 00:22:00 +02:00
AUDIT ( valid ( ) ) ;
return ( cause = = unmap | | cause = = reprotect | | cause = = superseded_by_fbo ) ;
2018-09-22 02:14:26 +02:00
}
2019-02-27 19:26:22 +01:00
constexpr bool skip_fbos ( ) const
{
AUDIT ( valid ( ) ) ;
return ( cause = = superseded_by_fbo | | cause = = committed_as_fbo ) ;
}
constexpr bool skip_flush ( ) const
2018-09-22 02:14:26 +02:00
{
2018-10-19 00:22:00 +02:00
AUDIT ( valid ( ) ) ;
return ( cause = = unmap ) | | ( ! g_cfg . video . strict_texture_flushing & & cause = = superseded_by_fbo ) ;
2018-09-22 02:14:26 +02:00
}
2018-10-19 00:22:00 +02:00
constexpr invalidation_cause undefer ( ) const
2018-09-22 02:14:26 +02:00
{
2018-10-19 00:22:00 +02:00
AUDIT ( deferred_flush ( ) ) ;
if ( cause = = deferred_read )
2018-09-22 02:14:26 +02:00
return read ;
2018-10-19 00:22:00 +02:00
else if ( cause = = deferred_write )
2018-09-22 02:14:26 +02:00
return write ;
else
fmt : : throw_exception ( " Unreachable " HERE ) ;
}
2019-02-25 16:03:14 +01:00
constexpr invalidation_cause defer ( ) const
{
AUDIT ( ! deferred_flush ( ) ) ;
if ( cause = = read )
return deferred_read ;
else if ( cause = = write )
return deferred_write ;
else
fmt : : throw_exception ( " Unreachable " HERE ) ;
}
2018-10-19 00:22:00 +02:00
constexpr invalidation_cause ( ) : cause ( invalid ) { }
constexpr invalidation_cause ( enum_type _cause ) : cause ( _cause ) { }
2018-09-22 02:14:26 +02:00
operator enum_type & ( ) { return cause ; }
2018-10-19 00:22:00 +02:00
constexpr operator enum_type ( ) const { return cause ; }
2018-09-22 02:14:26 +02:00
} ;
/**
* List structure used in Ranged Storage Blocks
* List of Arrays
* ( avoids reallocation without the significant disadvantages of slow iteration through a list )
*/
template < typename section_storage_type , size_t array_size >
class ranged_storage_block_list
{
static_assert ( array_size > 0 , " array_elements must be positive non-zero " ) ;
public :
using value_type = section_storage_type ;
using array_type = typename std : : array < value_type , array_size > ;
using list_type = typename std : : list < array_type > ;
using size_type = u32 ;
// Iterator
template < typename T , typename block_list , typename list_iterator >
class iterator_tmpl
{
public :
// Traits
using value_type = T ;
using pointer = T * ;
using difference_type = int ;
using reference = T & ;
using iterator_category = std : : forward_iterator_tag ;
// Constructors
iterator_tmpl ( ) = default ;
iterator_tmpl ( block_list * _block ) :
block ( _block ) ,
list_it ( _block - > m_data . begin ( ) ) ,
idx ( 0 )
{
if ( _block - > empty ( ) )
idx = UINT32_MAX ;
}
private :
// Members
block_list * block ;
list_iterator list_it = { } ;
size_type idx = UINT32_MAX ;
size_type array_idx = 0 ;
inline void next ( )
{
2019-01-02 16:12:01 +01:00
+ + idx ;
2018-09-22 02:14:26 +02:00
if ( idx > = block - > size ( ) )
{
idx = UINT32_MAX ;
return ;
}
2019-01-02 16:12:01 +01:00
+ + array_idx ;
2018-09-22 02:14:26 +02:00
if ( array_idx > = array_size )
{
array_idx = 0 ;
list_it + + ;
}
}
public :
inline reference operator * ( ) const { return ( * list_it ) [ array_idx ] ; }
inline pointer operator - > ( ) const { return & ( ( * list_it ) [ array_idx ] ) ; }
inline reference operator + + ( ) { next ( ) ; return * * this ; }
inline reference operator + + ( int ) { auto & res = * * this ; next ( ) ; return res ; }
inline bool operator = = ( const iterator_tmpl & rhs ) const { return idx = = rhs . idx ; }
inline bool operator ! = ( const iterator_tmpl & rhs ) const { return idx ! = rhs . idx ; }
} ;
using iterator = iterator_tmpl < value_type , ranged_storage_block_list , typename list_type : : iterator > ;
using const_iterator = iterator_tmpl < const value_type , const ranged_storage_block_list , typename list_type : : const_iterator > ;
// Members
size_type m_size = 0 ;
list_type m_data ;
typename list_type : : iterator m_data_it ;
size_type m_array_idx ;
size_type m_capacity ;
// Helpers
inline void next_array ( )
{
if ( m_data_it = = m_data . end ( ) | | + + m_data_it = = m_data . end ( ) )
{
m_data_it = m_data . emplace ( m_data_it ) ;
m_capacity + = array_size ;
}
m_array_idx = 0 ;
}
public :
// Constructor, Destructor
ranged_storage_block_list ( ) :
m_data_it ( m_data . end ( ) ) ,
m_array_idx ( UINT32_MAX )
{ }
// Iterator
inline iterator begin ( ) noexcept { return { this } ; }
inline const_iterator begin ( ) const noexcept { return { this } ; }
constexpr iterator end ( ) noexcept { return { } ; }
constexpr const_iterator end ( ) const noexcept { return { } ; }
// Operators
inline value_type & front ( )
{
AUDIT ( ! empty ( ) ) ;
return m_data . front ( ) [ 0 ] ;
}
inline value_type & back ( )
{
AUDIT ( m_data_it ! = m_data . end ( ) & & m_array_idx < array_size ) ;
return ( * m_data_it ) [ m_array_idx ] ;
}
// Other operations on data
inline size_type size ( ) const { return m_size ; }
inline size_type capacity ( ) const { return m_capacity ; }
inline bool empty ( ) const { return m_size = = 0 ; }
inline void clear ( )
{
m_size = 0 ;
m_array_idx = 0 ;
m_data_it = m_data . begin ( ) ;
}
inline void free ( )
{
m_size = 0 ;
m_array_idx = 0 ;
m_capacity = 0 ;
2019-06-01 14:41:02 +02:00
m_data . clear ( ) ;
2018-09-22 02:14:26 +02:00
m_data_it = m_data . end ( ) ;
}
inline void reserve ( size_type new_size )
{
if ( new_size < = m_capacity ) return ;
size_type new_num_arrays = ( ( new_size - 1 ) / array_size ) + 1 ;
m_data . reserve ( new_num_arrays ) ;
m_capacity = new_num_arrays * array_size ;
}
template < typename . . . Args >
inline value_type & emplace_back ( Args & & . . . args )
{
if ( m_array_idx > = array_size )
{
next_array ( ) ;
}
ASSERT ( m_capacity > 0 & & m_array_idx < array_size & & m_data_it ! = m_data . end ( ) ) ;
value_type * dest = & ( ( * m_data_it ) [ m_array_idx + + ] ) ;
new ( dest ) value_type ( std : : forward < Args > ( args ) . . . ) ;
2019-01-02 16:12:01 +01:00
+ + m_size ;
2018-09-22 02:14:26 +02:00
return * dest ;
}
} ;
/**
* Ranged storage
*/
template < typename _ranged_storage_type >
class ranged_storage_block
{
public :
using ranged_storage_type = _ranged_storage_type ;
using section_storage_type = typename ranged_storage_type : : section_storage_type ;
using texture_cache_type = typename ranged_storage_type : : texture_cache_type ;
//using block_container_type = std::list<section_storage_type>;
using block_container_type = ranged_storage_block_list < section_storage_type , 64 > ;
using iterator = typename block_container_type : : iterator ;
using const_iterator = typename block_container_type : : const_iterator ;
using size_type = typename block_container_type : : size_type ;
static constexpr u32 num_blocks = ranged_storage_type : : num_blocks ;
static constexpr u32 block_size = ranged_storage_type : : block_size ;
using unowned_container_type = std : : unordered_set < section_storage_type * > ;
using unowned_iterator = typename unowned_container_type : : iterator ;
using unowned_const_iterator = typename unowned_container_type : : const_iterator ;
private :
u32 index = 0 ;
address_range range = { } ;
block_container_type sections = { } ;
unowned_container_type unowned ; // pointers to sections from other blocks that overlap this block
std : : atomic < u32 > exists_count = 0 ;
std : : atomic < u32 > locked_count = 0 ;
std : : atomic < u32 > unreleased_count = 0 ;
ranged_storage_type * m_storage = nullptr ;
inline void add_owned_section_overlaps ( section_storage_type & section )
{
u32 end = section . get_section_range ( ) . end ;
for ( auto * block = next_block ( ) ; block ! = nullptr & & end > = block - > get_start ( ) ; block = block - > next_block ( ) )
{
block - > add_unowned_section ( section ) ;
}
}
inline void remove_owned_section_overlaps ( section_storage_type & section )
{
u32 end = section . get_section_range ( ) . end ;
for ( auto * block = next_block ( ) ; block ! = nullptr & & end > = block - > get_start ( ) ; block = block - > next_block ( ) )
{
block - > remove_unowned_section ( section ) ;
}
}
public :
// Construction
ranged_storage_block ( ) = default ;
void initialize ( u32 _index , ranged_storage_type * storage )
{
verify ( HERE ) , m_storage = = nullptr & & storage ! = nullptr ;
AUDIT ( index < num_blocks ) ;
m_storage = storage ;
index = _index ;
range = address_range : : start_length ( index * block_size , block_size ) ;
AUDIT ( range . is_page_range ( ) & & get_start ( ) / block_size = = index ) ;
}
/**
* Wrappers
*/
constexpr iterator begin ( ) noexcept { return sections . begin ( ) ; }
constexpr const_iterator begin ( ) const noexcept { return sections . begin ( ) ; }
inline iterator end ( ) noexcept { return sections . end ( ) ; }
inline const_iterator end ( ) const noexcept { return sections . end ( ) ; }
inline iterator at ( size_type pos ) { return sections . data ( pos ) ; }
inline const_iterator at ( size_type pos ) const { return sections . data ( pos ) ; }
inline bool empty ( ) const { return sections . empty ( ) ; }
inline size_type size ( ) const { return sections . size ( ) ; }
inline u32 get_exists_count ( ) const { return exists_count ; }
inline u32 get_locked_count ( ) const { return locked_count ; }
inline u32 get_unreleased_count ( ) const { return unreleased_count ; }
/**
* Utilities
*/
ranged_storage_type & get_storage ( ) const
{
AUDIT ( m_storage ! = nullptr ) ;
return * m_storage ;
}
texture_cache_type & get_texture_cache ( ) const
{
return get_storage ( ) . get_texture_cache ( ) ;
}
inline section_storage_type & create_section ( )
{
auto & res = sections . emplace_back ( this ) ;
return res ;
}
inline void clear ( )
{
for ( auto & section : * this )
{
2018-10-19 00:22:00 +02:00
if ( section . is_locked ( ) )
section . unprotect ( ) ;
2018-09-22 02:14:26 +02:00
section . destroy ( ) ;
}
AUDIT ( exists_count = = 0 ) ;
AUDIT ( unreleased_count = = 0 ) ;
AUDIT ( locked_count = = 0 ) ;
sections . clear ( ) ;
}
inline bool is_first_block ( ) const
{
return index = = 0 ;
}
inline bool is_last_block ( ) const
{
return index = = num_blocks - 1 ;
}
inline ranged_storage_block * prev_block ( ) const
{
if ( is_first_block ( ) ) return nullptr ;
return & get_storage ( ) [ index - 1 ] ;
}
inline ranged_storage_block * next_block ( ) const
{
if ( is_last_block ( ) ) return nullptr ;
return & get_storage ( ) [ index + 1 ] ;
}
// Address range
inline const address_range & get_range ( ) const { return range ; }
inline u32 get_start ( ) const { return range . start ; }
inline u32 get_end ( ) const { return range . end ; }
inline u32 get_index ( ) const { return index ; }
inline bool overlaps ( const section_storage_type & section , section_bounds bounds = full_range ) const { return section . overlaps ( range , bounds ) ; }
inline bool overlaps ( const address_range & _range ) const { return range . overlaps ( _range ) ; }
/**
* Section callbacks
*/
inline void on_section_protected ( const section_storage_type & section )
{
( void ) section ; // silence unused warning without _AUDIT
AUDIT ( section . is_locked ( ) ) ;
locked_count + + ;
}
inline void on_section_unprotected ( const section_storage_type & section )
{
( void ) section ; // silence unused warning without _AUDIT
AUDIT ( ! section . is_locked ( ) ) ;
u32 prev_locked = locked_count - - ;
ASSERT ( prev_locked > 0 ) ;
}
inline void on_section_range_valid ( section_storage_type & section )
{
AUDIT ( section . valid_range ( ) ) ;
AUDIT ( range . overlaps ( section . get_section_base ( ) ) ) ;
add_owned_section_overlaps ( section ) ;
}
inline void on_section_range_invalid ( section_storage_type & section )
{
AUDIT ( section . valid_range ( ) ) ;
AUDIT ( range . overlaps ( section . get_section_base ( ) ) ) ;
remove_owned_section_overlaps ( section ) ;
}
inline void on_section_resources_created ( const section_storage_type & section )
{
( void ) section ; // silence unused warning without _AUDIT
AUDIT ( section . exists ( ) ) ;
u32 prev_exists = exists_count + + ;
if ( prev_exists = = 0 )
{
m_storage - > on_ranged_block_first_section_created ( * this ) ;
}
}
inline void on_section_resources_destroyed ( const section_storage_type & section )
{
( void ) section ; // silence unused warning without _AUDIT
AUDIT ( ! section . exists ( ) ) ;
u32 prev_exists = exists_count - - ;
ASSERT ( prev_exists > 0 ) ;
if ( prev_exists = = 1 )
{
m_storage - > on_ranged_block_last_section_destroyed ( * this ) ;
}
}
void on_section_released ( const section_storage_type & /*section*/ )
{
u32 prev_unreleased = unreleased_count - - ;
ASSERT ( prev_unreleased > 0 ) ;
}
void on_section_unreleased ( const section_storage_type & /*section*/ )
{
unreleased_count + + ;
}
/**
* Overlapping sections
*/
inline bool contains_unowned ( section_storage_type & section ) const
{
return ( unowned . find ( & section ) ! = unowned . end ( ) ) ;
}
inline void add_unowned_section ( section_storage_type & section )
{
AUDIT ( overlaps ( section ) ) ;
AUDIT ( section . get_section_base ( ) < range . start ) ;
AUDIT ( ! contains_unowned ( section ) ) ;
unowned . insert ( & section ) ;
}
inline void remove_unowned_section ( section_storage_type & section )
{
AUDIT ( overlaps ( section ) ) ;
AUDIT ( section . get_section_base ( ) < range . start ) ;
AUDIT ( contains_unowned ( section ) ) ;
unowned . erase ( & section ) ;
}
inline unowned_iterator unowned_begin ( ) { return unowned . begin ( ) ; }
inline unowned_const_iterator unowned_begin ( ) const { return unowned . begin ( ) ; }
inline unowned_iterator unowned_end ( ) { return unowned . end ( ) ; }
inline unowned_const_iterator unowned_end ( ) const { return unowned . end ( ) ; }
inline bool unowned_empty ( ) const { return unowned . empty ( ) ; }
} ;
2018-10-28 14:59:39 +01:00
template < typename traits >
2018-09-22 02:14:26 +02:00
class ranged_storage
{
public :
static constexpr u32 block_size = 0x100 ' 0000 ;
static_assert ( block_size % 4096u = = 0 , " block_size must be a multiple of the page size " ) ;
static constexpr u32 num_blocks = u32 { 0x1 ' 0000 ' 0000ull / block_size } ;
static_assert ( ( num_blocks > 0 ) & & ( u64 { num_blocks } * block_size = = 0x1 ' 0000 ' 0000ull ) , " Invalid block_size/num_blocks " ) ;
2018-10-28 14:59:39 +01:00
using section_storage_type = typename traits : : section_storage_type ;
using texture_cache_type = typename traits : : texture_cache_base_type ;
using block_type = ranged_storage_block < ranged_storage > ;
2018-09-22 02:14:26 +02:00
private :
block_type blocks [ num_blocks ] ;
texture_cache_type * m_tex_cache ;
std : : unordered_set < block_type * > m_in_use ;
bool m_purging = false ;
public :
std : : atomic < u32 > m_unreleased_texture_objects = { 0 } ; //Number of invalidated objects not yet freed from memory
2018-10-19 00:22:00 +02:00
std : : atomic < u64 > m_texture_memory_in_use = { 0 } ;
2018-09-22 02:14:26 +02:00
// Constructor
ranged_storage ( texture_cache_type * tex_cache ) :
m_tex_cache ( tex_cache )
{
// Initialize blocks
for ( u32 i = 0 ; i < num_blocks ; i + + )
{
blocks [ i ] . initialize ( i , this ) ;
}
}
/**
* Iterators
*/
constexpr auto begin ( ) { return std : : begin ( blocks ) ; }
constexpr auto begin ( ) const { return std : : begin ( blocks ) ; }
constexpr auto end ( ) { return std : : end ( blocks ) ; }
constexpr auto end ( ) const { return std : : end ( blocks ) ; }
/**
* Utilities
*/
inline block_type & block_for ( u32 address )
{
return blocks [ address / block_size ] ;
}
inline const block_type & block_for ( u32 address ) const
{
return blocks [ address / block_size ] ;
}
inline block_type & block_for ( const address_range & range )
{
AUDIT ( range . valid ( ) ) ;
return block_for ( range . start ) ;
}
inline block_type & block_for ( const section_storage_type & section )
{
return block_for ( section . get_section_base ( ) ) ;
}
inline block_type & operator [ ] ( size_t pos )
{
AUDIT ( pos < num_blocks ) ;
return blocks [ pos ] ;
}
inline texture_cache_type & get_texture_cache ( ) const
{
AUDIT ( m_tex_cache ! = nullptr ) ;
return * m_tex_cache ;
}
/**
* Blocks
*/
void clear ( )
{
for ( auto & block : * this )
{
block . clear ( ) ;
}
m_in_use . clear ( ) ;
AUDIT ( m_unreleased_texture_objects = = 0 ) ;
AUDIT ( m_texture_memory_in_use = = 0 ) ;
}
void purge_unreleased_sections ( )
{
// We will be iterating through m_in_use
// do not allow the callbacks to touch m_in_use to avoid invalidating the iterator
m_purging = true ;
//Reclaims all graphics memory consumed by dirty textures
for ( auto it = m_in_use . begin ( ) ; it ! = m_in_use . end ( ) ; )
{
auto * block = * it ;
if ( block - > get_unreleased_count ( ) > 0 )
{
for ( auto & tex : * block )
{
if ( ! tex . is_unreleased ( ) )
continue ;
ASSERT ( ! tex . is_locked ( ) ) ;
tex . destroy ( ) ;
}
}
if ( block - > get_exists_count ( ) = = 0 )
{
it = m_in_use . erase ( it ) ;
}
else
{
it + + ;
}
}
m_purging = false ;
AUDIT ( m_unreleased_texture_objects = = 0 ) ;
}
/**
* Callbacks
*/
void on_section_released ( const section_storage_type & /*section*/ )
{
u32 prev_unreleased = m_unreleased_texture_objects - - ;
ASSERT ( prev_unreleased > 0 ) ;
}
void on_section_unreleased ( const section_storage_type & /*section*/ )
{
m_unreleased_texture_objects + + ;
}
void on_section_resources_created ( const section_storage_type & section )
{
m_texture_memory_in_use + = section . get_section_size ( ) ;
}
void on_section_resources_destroyed ( const section_storage_type & section )
{
2018-10-19 00:22:00 +02:00
u64 size = section . get_section_size ( ) ;
u64 prev_size = m_texture_memory_in_use . fetch_sub ( size ) ;
2018-09-22 02:14:26 +02:00
ASSERT ( prev_size > = size ) ;
}
void on_ranged_block_first_section_created ( block_type & block )
{
AUDIT ( ! m_purging ) ;
AUDIT ( m_in_use . find ( & block ) = = m_in_use . end ( ) ) ;
m_in_use . insert ( & block ) ;
}
void on_ranged_block_last_section_destroyed ( block_type & block )
{
if ( m_purging )
return ;
AUDIT ( m_in_use . find ( & block ) ! = m_in_use . end ( ) ) ;
m_in_use . erase ( & block ) ;
}
/**
* Ranged Iterator
*/
// Iterator
template < typename T , typename unowned_iterator , typename section_iterator , typename block_type , typename parent_type >
class range_iterator_tmpl
{
public :
// Traits
using value_type = T ;
using pointer = T * ;
using difference_type = int ;
using reference = T & ;
using iterator_category = std : : forward_iterator_tag ;
// Constructors
range_iterator_tmpl ( ) = default ; // end iterator
explicit range_iterator_tmpl ( parent_type & storage , const address_range & _range , section_bounds _bounds , bool _locked_only ) :
range ( _range ) ,
bounds ( _bounds ) ,
block ( & storage . block_for ( range . start ) ) ,
unowned_it ( block - > unowned_begin ( ) ) ,
unowned_remaining ( true ) ,
cur_block_it ( block - > begin ( ) ) ,
locked_only ( _locked_only )
{
// do a "fake" iteration to ensure the internal state is consistent
next ( false ) ;
}
private :
// Members
address_range range ;
section_bounds bounds ;
block_type * block = nullptr ;
bool needs_overlap_check = true ;
bool unowned_remaining = false ;
unowned_iterator unowned_it = { } ;
section_iterator cur_block_it = { } ;
pointer obj = nullptr ;
bool locked_only = false ;
inline void next ( bool iterate = true )
{
AUDIT ( block ! = nullptr ) ;
if ( unowned_remaining )
{
do
{
// Still have "unowned" sections from blocks before the range to loop through
auto blk_end = block - > unowned_end ( ) ;
if ( iterate & & unowned_it ! = blk_end )
{
2019-01-02 16:12:01 +01:00
+ + unowned_it ;
2018-09-22 02:14:26 +02:00
}
if ( unowned_it ! = blk_end )
{
obj = * unowned_it ;
if ( obj - > valid_range ( ) & & ( ! locked_only | | obj - > is_locked ( ) ) & & obj - > overlaps ( range , bounds ) )
return ;
iterate = true ;
continue ;
}
// No more unowned sections remaining
unowned_remaining = false ;
iterate = false ;
break ;
} while ( true ) ;
}
// Go to next block
do
{
// Iterate current block
do
{
auto blk_end = block - > end ( ) ;
if ( iterate & & cur_block_it ! = blk_end )
{
2019-01-02 16:12:01 +01:00
+ + cur_block_it ;
2018-09-22 02:14:26 +02:00
}
if ( cur_block_it ! = blk_end )
{
obj = & ( * cur_block_it ) ;
if ( obj - > valid_range ( ) & & ( ! locked_only | | obj - > is_locked ( ) ) & & ( ! needs_overlap_check | | obj - > overlaps ( range , bounds ) ) )
return ;
iterate = true ;
continue ;
}
break ;
} while ( true ) ;
// Move to next block(s)
do
{
block = block - > next_block ( ) ;
if ( block = = nullptr | | block - > get_start ( ) > range . end ) // Reached end
{
block = nullptr ;
obj = nullptr ;
return ;
}
needs_overlap_check = ( block - > get_end ( ) > range . end ) ;
cur_block_it = block - > begin ( ) ;
iterate = false ;
} while ( locked_only & & block - > get_locked_count ( ) = = 0 ) ; // find a block with locked sections
} while ( true ) ;
}
public :
inline reference operator * ( ) const { return * obj ; }
inline pointer operator - > ( ) const { return obj ; }
inline reference operator + + ( ) { next ( ) ; return * obj ; }
inline reference operator + + ( int ) { auto * ptr = obj ; next ( ) ; return * ptr ; }
inline bool operator = = ( const range_iterator_tmpl & rhs ) const { return obj = = rhs . obj & & unowned_remaining = = rhs . unowned_remaining ; }
inline bool operator ! = ( const range_iterator_tmpl & rhs ) const { return ! operator = = ( rhs ) ; }
inline void set_end ( u32 new_end )
{
range . end = new_end ;
// If we've exceeded the new end, invalidate iterator
if ( block - > get_start ( ) > range . end )
{
block = nullptr ;
}
}
inline block_type & get_block ( ) const
{
AUDIT ( block ! = nullptr ) ;
return * block ;
}
inline section_bounds get_bounds ( ) const
{
return bounds ;
}
} ;
using range_iterator = range_iterator_tmpl < section_storage_type , typename block_type : : unowned_iterator , typename block_type : : iterator , block_type , ranged_storage > ;
using range_const_iterator = range_iterator_tmpl < const section_storage_type , typename block_type : : unowned_const_iterator , typename block_type : : const_iterator , const block_type , const ranged_storage > ;
inline range_iterator range_begin ( const address_range & range , section_bounds bounds , bool locked_only = false ) {
return range_iterator ( * this , range , bounds , locked_only ) ;
}
inline range_const_iterator range_begin ( const address_range & range , section_bounds bounds , bool locked_only = false ) const {
return range_const_iterator ( * this , range , bounds , locked_only ) ;
}
inline range_const_iterator range_begin ( u32 address , section_bounds bounds , bool locked_only = false ) const {
return range_const_iterator ( * this , address_range : : start_length ( address , 1 ) , bounds , locked_only ) ;
}
constexpr range_iterator range_end ( )
{
return range_iterator ( ) ;
}
constexpr range_const_iterator range_end ( ) const
{
return range_const_iterator ( ) ;
}
/**
* Debug
*/
# ifdef TEXTURE_CACHE_DEBUG
void verify_protection ( bool recount = false )
{
if ( recount )
{
// Reset calculated part of the page_info struct
tex_cache_checker . reset_refcount ( ) ;
// Go through all blocks and update calculated values
for ( auto & block : * this )
{
for ( auto & tex : block )
{
if ( tex . is_locked ( ) )
{
tex_cache_checker . add ( tex . get_locked_range ( ) , tex . get_protection ( ) ) ;
}
}
}
}
// Verify
tex_cache_checker . verify ( ) ;
}
# endif //TEXTURE_CACHE_DEBUG
} ;
/**
* Cached Texture Section
*/
2018-10-28 14:59:39 +01:00
template < typename derived_type , typename traits >
2018-09-22 02:14:26 +02:00
class cached_texture_section : public rsx : : buffered_section
{
public :
2018-10-28 14:59:39 +01:00
using ranged_storage_type = ranged_storage < traits > ;
2018-09-22 02:14:26 +02:00
using ranged_storage_block_type = ranged_storage_block < ranged_storage_type > ;
2018-10-28 14:59:39 +01:00
using texture_cache_type = typename traits : : texture_cache_base_type ;
using predictor_type = texture_cache_predictor < traits > ;
using predictor_key_type = typename predictor_type : : key_type ;
using predictor_entry_type = typename predictor_type : : mapped_type ;
2018-09-22 02:14:26 +02:00
protected :
ranged_storage_type * m_storage = nullptr ;
ranged_storage_block_type * m_block = nullptr ;
texture_cache_type * m_tex_cache = nullptr ;
private :
constexpr derived_type * derived ( )
{
return static_cast < derived_type * > ( this ) ;
}
constexpr const derived_type * derived ( ) const
{
return static_cast < const derived_type * > ( this ) ;
}
bool dirty = true ;
bool triggered_exists_callbacks = false ;
bool triggered_unreleased_callbacks = false ;
protected :
u16 width ;
u16 height ;
u16 depth ;
u16 mipmaps ;
u16 real_pitch ;
u16 rsx_pitch ;
u32 gcm_format = 0 ;
bool pack_unpack_swap_bytes = false ;
u64 sync_timestamp = 0 ;
bool synchronized = false ;
bool flushed = false ;
2018-10-28 14:59:39 +01:00
bool speculatively_flushed = false ;
2018-09-22 02:14:26 +02:00
rsx : : memory_read_flags readback_behaviour = rsx : : memory_read_flags : : flush_once ;
rsx : : texture_create_flags view_flags = rsx : : texture_create_flags : : default_component_order ;
rsx : : texture_upload_context context = rsx : : texture_upload_context : : shader_read ;
rsx : : texture_dimension_extended image_type = rsx : : texture_dimension_extended : : texture_dimension_2d ;
2018-11-01 02:31:12 +01:00
address_range_vector flush_exclusions ; // Address ranges that will be skipped during flush
2018-10-28 14:59:39 +01:00
predictor_type * m_predictor = nullptr ;
size_t m_predictor_key_hash = 0 ;
predictor_entry_type * m_predictor_entry = nullptr ;
2018-09-22 02:14:26 +02:00
public :
u64 cache_tag = 0 ;
u64 last_write_tag = 0 ;
~ cached_texture_section ( )
{
AUDIT ( ! exists ( ) ) ;
}
cached_texture_section ( ) = default ;
2018-11-01 02:31:12 +01:00
cached_texture_section ( ranged_storage_block_type * block )
2018-09-22 02:14:26 +02:00
{
2018-11-01 02:31:12 +01:00
initialize ( block ) ;
2018-09-22 02:14:26 +02:00
}
void initialize ( ranged_storage_block_type * block )
{
verify ( HERE ) , m_block = = nullptr & & m_tex_cache = = nullptr & & m_storage = = nullptr ;
m_block = block ;
m_storage = & block - > get_storage ( ) ;
m_tex_cache = & block - > get_texture_cache ( ) ;
2018-10-28 14:59:39 +01:00
m_predictor = & m_tex_cache - > get_predictor ( ) ;
2018-09-22 02:14:26 +02:00
update_unreleased ( ) ;
}
/**
* Reset
*/
void reset ( const address_range & memory_range )
{
AUDIT ( memory_range . valid ( ) ) ;
AUDIT ( ! is_locked ( ) ) ;
2018-10-19 00:22:00 +02:00
// Destroy if necessary
destroy ( ) ;
2018-09-22 02:14:26 +02:00
// Superclass
rsx : : buffered_section : : reset ( memory_range ) ;
// Reset member variables to the default
width = 0 ;
height = 0 ;
depth = 0 ;
mipmaps = 0 ;
real_pitch = 0 ;
rsx_pitch = 0 ;
gcm_format = 0 ;
pack_unpack_swap_bytes = false ;
sync_timestamp = 0ull ;
synchronized = false ;
flushed = false ;
2018-10-28 14:59:39 +01:00
speculatively_flushed = false ;
2018-09-22 02:14:26 +02:00
cache_tag = 0ull ;
last_write_tag = 0ull ;
2018-10-28 14:59:39 +01:00
m_predictor_entry = nullptr ;
2018-09-22 02:14:26 +02:00
readback_behaviour = rsx : : memory_read_flags : : flush_once ;
view_flags = rsx : : texture_create_flags : : default_component_order ;
context = rsx : : texture_upload_context : : shader_read ;
image_type = rsx : : texture_dimension_extended : : texture_dimension_2d ;
2018-11-01 02:31:12 +01:00
flush_exclusions . clear ( ) ;
2018-09-22 02:14:26 +02:00
// Set to dirty
set_dirty ( true ) ;
// Notify that our CPU range is now valid
notify_range_valid ( ) ;
}
/**
* Destroyed Flag
*/
inline bool is_destroyed ( ) const { return ! exists ( ) ; } // this section is currently destroyed
protected :
void on_section_resources_created ( )
{
AUDIT ( exists ( ) ) ;
AUDIT ( valid_range ( ) ) ;
if ( triggered_exists_callbacks ) return ;
triggered_exists_callbacks = true ;
// Callbacks
m_block - > on_section_resources_created ( * derived ( ) ) ;
m_storage - > on_section_resources_created ( * derived ( ) ) ;
}
void on_section_resources_destroyed ( )
{
if ( ! triggered_exists_callbacks ) return ;
triggered_exists_callbacks = false ;
AUDIT ( valid_range ( ) ) ;
2018-10-19 00:22:00 +02:00
ASSERT ( ! is_locked ( ) ) ;
ASSERT ( is_managed ( ) ) ;
2018-09-22 02:14:26 +02:00
// Set dirty
set_dirty ( true ) ;
// Trigger callbacks
m_block - > on_section_resources_destroyed ( * derived ( ) ) ;
m_storage - > on_section_resources_destroyed ( * derived ( ) ) ;
// Invalidate range
invalidate_range ( ) ;
}
2019-07-01 21:52:02 +02:00
virtual void dma_abort ( )
{ }
2018-09-22 02:14:26 +02:00
public :
/**
* Dirty / Unreleased Flag
*/
inline bool is_dirty ( ) const { return dirty ; } // this section is dirty and will need to be reuploaded
void set_dirty ( bool new_dirty )
{
if ( new_dirty = = false & & ! is_locked ( ) & & context = = texture_upload_context : : shader_read )
return ;
dirty = new_dirty ;
AUDIT ( dirty | | ( ! dirty & & exists ( ) ) ) ;
update_unreleased ( ) ;
}
private :
void update_unreleased ( )
{
bool unreleased = is_unreleased ( ) ;
if ( unreleased & & ! triggered_unreleased_callbacks )
{
triggered_unreleased_callbacks = true ;
m_block - > on_section_unreleased ( * derived ( ) ) ;
m_storage - > on_section_unreleased ( * derived ( ) ) ;
}
else if ( ! unreleased & & triggered_unreleased_callbacks )
{
triggered_unreleased_callbacks = false ;
m_block - > on_section_released ( * derived ( ) ) ;
m_storage - > on_section_released ( * derived ( ) ) ;
}
}
/**
* Valid Range
*/
void notify_range_valid ( )
{
AUDIT ( valid_range ( ) ) ;
// Callbacks
m_block - > on_section_range_valid ( * derived ( ) ) ;
//m_storage->on_section_range_valid(*derived());
// Reset texture_cache m_flush_always_cache
if ( readback_behaviour = = memory_read_flags : : flush_always )
{
m_tex_cache - > on_memory_read_flags_changed ( * derived ( ) , memory_read_flags : : flush_always ) ;
}
}
void invalidate_range ( )
{
if ( ! valid_range ( ) )
return ;
// Reset texture_cache m_flush_always_cache
if ( readback_behaviour = = memory_read_flags : : flush_always )
{
m_tex_cache - > on_memory_read_flags_changed ( * derived ( ) , memory_read_flags : : flush_once ) ;
}
// Notify the storage block that we are now invalid
m_block - > on_section_range_invalid ( * derived ( ) ) ;
//m_storage->on_section_range_invalid(*derived());
2018-10-28 14:59:39 +01:00
m_predictor_entry = nullptr ;
speculatively_flushed = false ;
2018-09-22 02:14:26 +02:00
buffered_section : : invalidate_range ( ) ;
}
public :
/**
* Misc .
*/
bool is_unreleased ( ) const
{
2018-10-19 00:22:00 +02:00
return exists ( ) & & is_dirty ( ) & & ! is_locked ( ) ;
2018-09-22 02:14:26 +02:00
}
bool can_be_reused ( ) const
{
return ! exists ( ) | | ( is_dirty ( ) & & ! is_locked ( ) ) ;
}
bool is_flushable ( ) const
{
//This section is active and can be flushed to cpu
return ( get_protection ( ) = = utils : : protection : : no ) ;
}
private :
/**
* Protection
*/
void post_protect ( utils : : protection old_prot , utils : : protection prot )
{
if ( old_prot ! = utils : : protection : : rw & & prot = = utils : : protection : : rw )
{
AUDIT ( ! is_locked ( ) ) ;
m_block - > on_section_unprotected ( * derived ( ) ) ;
// Blit and framebuffers may be unprotected and clean
if ( context = = texture_upload_context : : shader_read )
{
set_dirty ( true ) ;
}
}
else if ( old_prot = = utils : : protection : : rw & & prot ! = utils : : protection : : rw )
{
AUDIT ( is_locked ( ) ) ;
m_block - > on_section_protected ( * derived ( ) ) ;
set_dirty ( false ) ;
}
}
public :
inline void protect ( utils : : protection prot )
{
utils : : protection old_prot = get_protection ( ) ;
rsx : : buffered_section : : protect ( prot ) ;
post_protect ( old_prot , prot ) ;
}
inline void protect ( utils : : protection prot , const std : : pair < u32 , u32 > & range_confirm )
{
utils : : protection old_prot = get_protection ( ) ;
rsx : : buffered_section : : protect ( prot , range_confirm ) ;
post_protect ( old_prot , prot ) ;
}
inline void unprotect ( )
{
utils : : protection old_prot = get_protection ( ) ;
rsx : : buffered_section : : unprotect ( ) ;
post_protect ( old_prot , utils : : protection : : rw ) ;
}
inline void discard ( bool set_dirty = true )
{
utils : : protection old_prot = get_protection ( ) ;
rsx : : buffered_section : : discard ( ) ;
post_protect ( old_prot , utils : : protection : : rw ) ;
if ( set_dirty )
{
this - > set_dirty ( true ) ;
}
}
void reprotect ( const utils : : protection prot )
{
2019-07-01 21:52:02 +02:00
if ( synchronized & & ! flushed )
{
// Abort enqueued transfer
dma_abort ( ) ;
}
2018-09-22 02:14:26 +02:00
//Reset properties and protect again
flushed = false ;
synchronized = false ;
sync_timestamp = 0ull ;
protect ( prot ) ;
}
void reprotect ( const utils : : protection prot , const std : : pair < u32 , u32 > & range )
{
2019-07-01 21:52:02 +02:00
if ( synchronized & & ! flushed )
{
// Abort enqueued transfer
dma_abort ( ) ;
}
2018-09-22 02:14:26 +02:00
//Reset properties and protect again
flushed = false ;
synchronized = false ;
sync_timestamp = 0ull ;
protect ( prot , range ) ;
}
/**
2018-10-28 14:59:39 +01:00
* Prediction
2018-09-22 02:14:26 +02:00
*/
2018-10-28 14:59:39 +01:00
bool tracked_by_predictor ( ) const
{
// We do not update the predictor statistics for flush_always sections
return get_context ( ) ! = texture_upload_context : : shader_read & & get_memory_read_flags ( ) ! = memory_read_flags : : flush_always ;
}
2019-03-16 10:14:11 +01:00
void on_flush ( )
2018-10-28 14:59:39 +01:00
{
speculatively_flushed = false ;
m_tex_cache - > on_flush ( ) ;
if ( tracked_by_predictor ( ) )
{
get_predictor_entry ( ) . on_flush ( ) ;
}
2018-11-01 02:31:12 +01:00
flush_exclusions . clear ( ) ;
2018-10-28 14:59:39 +01:00
}
void on_speculative_flush ( )
{
speculatively_flushed = true ;
m_tex_cache - > on_speculative_flush ( ) ;
}
2019-03-16 10:14:11 +01:00
void on_miss ( )
{
LOG_WARNING ( RSX , " Cache miss at address 0x%X. This is gonna hurt... " , get_section_base ( ) ) ;
m_tex_cache - > on_miss ( * derived ( ) ) ;
}
2018-09-22 02:14:26 +02:00
void touch ( u64 tag )
{
last_write_tag = tag ;
2018-10-28 14:59:39 +01:00
if ( tracked_by_predictor ( ) )
{
get_predictor_entry ( ) . on_write ( speculatively_flushed ) ;
}
if ( speculatively_flushed )
{
m_tex_cache - > on_misprediction ( ) ;
}
2018-11-01 02:31:12 +01:00
flush_exclusions . clear ( ) ;
2018-09-22 02:14:26 +02:00
}
2018-10-28 14:59:39 +01:00
2018-11-01 02:31:12 +01:00
/**
* Flush
*/
private :
void imp_flush_memcpy ( u32 vm_dst , u8 * src , u32 len ) const
{
u8 * dst = get_ptr < u8 > ( vm_dst ) ;
address_range copy_range = address_range : : start_length ( vm_dst , len ) ;
if ( flush_exclusions . empty ( ) | | ! copy_range . overlaps ( flush_exclusions ) )
{
// Normal case = no flush exclusions, or no overlap
memcpy ( dst , src , len ) ;
return ;
}
else if ( copy_range . inside ( flush_exclusions ) )
{
// Nothing to copy
return ;
}
// Otherwise, we need to filter the memcpy with our flush exclusions
// Should be relatively rare
address_range_vector vec ;
vec . merge ( copy_range ) ;
vec . exclude ( flush_exclusions ) ;
for ( const auto & rng : vec )
{
if ( ! rng . valid ( ) )
continue ;
AUDIT ( rng . inside ( copy_range ) ) ;
u32 offset = rng . start - vm_dst ;
memcpy ( dst + offset , src + offset , rng . length ( ) ) ;
}
}
void imp_flush ( )
{
AUDIT ( synchronized ) ;
ASSERT ( real_pitch > 0 ) ;
// Calculate valid range
const auto valid_range = get_confirmed_range ( ) ;
AUDIT ( valid_range . valid ( ) ) ;
const auto valid_length = valid_range . length ( ) ;
const auto valid_offset = valid_range . start - get_section_base ( ) ;
AUDIT ( valid_length > 0 ) ;
2019-03-11 21:22:04 +01:00
// In case of pitch mismatch, match the offset point to the correct point
u32 mapped_offset , mapped_length ;
if ( real_pitch ! = rsx_pitch )
{
if ( LIKELY ( ! valid_offset ) )
{
mapped_offset = 0 ;
}
else if ( valid_offset )
{
const u32 offset_in_x = valid_offset % rsx_pitch ;
const u32 offset_in_y = valid_offset / rsx_pitch ;
mapped_offset = ( offset_in_y * real_pitch ) + offset_in_x ;
}
const u32 available_vmem = ( get_section_size ( ) / rsx_pitch ) * real_pitch + std : : min < u32 > ( get_section_size ( ) % rsx_pitch , real_pitch ) ;
mapped_length = std : : min ( available_vmem - mapped_offset , valid_length ) ;
}
else
{
mapped_offset = valid_offset ;
mapped_length = valid_length ;
}
2018-11-01 02:31:12 +01:00
// Obtain pointers to the source and destination memory regions
2019-03-11 21:22:04 +01:00
u8 * src = static_cast < u8 * > ( derived ( ) - > map_synchronized ( mapped_offset , mapped_length ) ) ;
2018-11-01 02:31:12 +01:00
u32 dst = valid_range . start ;
ASSERT ( src ! = nullptr ) ;
// Copy from src to dst
if ( real_pitch > = rsx_pitch | | valid_length < = rsx_pitch )
{
imp_flush_memcpy ( dst , src , valid_length ) ;
}
else
{
u8 * _src = src ;
u32 _dst = dst ;
2018-12-10 00:38:00 +01:00
const auto num_exclusions = flush_exclusions . size ( ) ;
if ( num_exclusions > 0 )
{
2019-02-27 19:26:22 +01:00
LOG_WARNING ( RSX , " Slow imp_flush path triggered with non-empty flush_exclusions (%d exclusions, %d bytes), performance might suffer " , num_exclusions , valid_length ) ;
2018-12-10 00:38:00 +01:00
}
2019-02-27 19:26:22 +01:00
for ( s32 remaining = s32 ( valid_length ) ; remaining > 0 ; remaining - = rsx_pitch )
2018-11-01 02:31:12 +01:00
{
imp_flush_memcpy ( _dst , _src , real_pitch ) ;
_src + = real_pitch ;
_dst + = rsx_pitch ;
}
}
}
public :
// Returns false if there was a cache miss
2019-03-16 10:14:11 +01:00
void flush ( )
2018-11-01 02:31:12 +01:00
{
2019-03-16 10:14:11 +01:00
if ( flushed ) return ;
2018-11-01 02:31:12 +01:00
// Sanity checks
ASSERT ( exists ( ) ) ;
AUDIT ( is_locked ( ) ) ;
// If we are fully inside the flush exclusions regions, we just mark ourselves as flushed and return
if ( get_confirmed_range ( ) . inside ( flush_exclusions ) )
{
flushed = true ;
flush_exclusions . clear ( ) ;
2019-03-16 10:14:11 +01:00
on_flush ( ) ;
return ;
2018-11-01 02:31:12 +01:00
}
2019-03-16 10:14:11 +01:00
// NOTE: Hard faults should have been pre-processed beforehand
ASSERT ( synchronized ) ;
2018-11-01 02:31:12 +01:00
// Copy flush result to guest memory
imp_flush ( ) ;
// Finish up
// Its highly likely that this surface will be reused, so we just leave resources in place
flushed = true ;
derived ( ) - > finish_flush ( ) ;
flush_exclusions . clear ( ) ;
2019-03-16 10:14:11 +01:00
on_flush ( ) ;
2018-11-01 02:31:12 +01:00
}
void add_flush_exclusion ( const address_range & rng )
{
AUDIT ( exists ( ) & & is_locked ( ) & & is_flushable ( ) ) ;
const auto _rng = rng . get_intersect ( get_section_range ( ) ) ;
flush_exclusions . merge ( _rng ) ;
}
2018-10-28 14:59:39 +01:00
/**
* Misc
*/
2018-11-01 02:31:12 +01:00
public :
2018-10-28 14:59:39 +01:00
predictor_entry_type & get_predictor_entry ( )
2018-09-22 02:14:26 +02:00
{
2018-10-28 14:59:39 +01:00
// If we don't have a predictor entry, or the key has changed
if ( m_predictor_entry = = nullptr | | ! m_predictor_entry - > key_matches ( * derived ( ) ) )
2018-09-22 02:14:26 +02:00
{
2018-10-28 14:59:39 +01:00
m_predictor_entry = & ( ( * m_predictor ) [ * derived ( ) ] ) ;
2018-09-22 02:14:26 +02:00
}
2018-10-28 14:59:39 +01:00
return * m_predictor_entry ;
2018-09-22 02:14:26 +02:00
}
void set_view_flags ( rsx : : texture_create_flags flags )
{
view_flags = flags ;
}
void set_context ( rsx : : texture_upload_context upload_context )
{
AUDIT ( ! exists ( ) | | ! is_locked ( ) | | context = = upload_context ) ;
context = upload_context ;
}
void set_image_type ( rsx : : texture_dimension_extended type )
{
image_type = type ;
}
void set_gcm_format ( u32 format )
{
gcm_format = format ;
}
void set_memory_read_flags ( memory_read_flags flags , bool notify_texture_cache = true )
{
const bool changed = ( flags ! = readback_behaviour ) ;
readback_behaviour = flags ;
if ( notify_texture_cache & & changed & & valid_range ( ) )
{
m_tex_cache - > on_memory_read_flags_changed ( * derived ( ) , flags ) ;
}
}
u16 get_width ( ) const
{
return width ;
}
u16 get_height ( ) const
{
return height ;
}
u16 get_depth ( ) const
{
return depth ;
}
u16 get_mipmaps ( ) const
{
return mipmaps ;
}
u16 get_rsx_pitch ( ) const
{
return rsx_pitch ;
}
rsx : : texture_create_flags get_view_flags ( ) const
{
return view_flags ;
}
rsx : : texture_upload_context get_context ( ) const
{
return context ;
}
rsx : : section_bounds get_overlap_test_bounds ( ) const
{
if ( guard_policy = = protection_policy : : protect_policy_full_range )
return rsx : : section_bounds : : locked_range ;
const bool strict_range_check = g_cfg . video . write_color_buffers | | g_cfg . video . write_depth_buffer ;
return ( strict_range_check | | get_context ( ) = = rsx : : texture_upload_context : : blit_engine_dst ) ?
rsx : : section_bounds : : confirmed_range :
rsx : : section_bounds : : locked_range ;
}
rsx : : texture_dimension_extended get_image_type ( ) const
{
return image_type ;
}
u32 get_gcm_format ( ) const
{
return gcm_format ;
}
memory_read_flags get_memory_read_flags ( ) const
{
return readback_behaviour ;
}
u64 get_sync_timestamp ( ) const
{
return sync_timestamp ;
}
/**
* Comparison
*/
inline bool matches ( const address_range & memory_range )
{
return valid_range ( ) & & rsx : : buffered_section : : matches ( memory_range ) ;
}
bool matches_dimensions ( u32 width , u32 height , u32 depth , u32 mipmaps )
{
if ( ! valid_range ( ) )
return false ;
if ( ! width & & ! height & & ! depth & & ! mipmaps )
return true ;
if ( width & & width ! = this - > width )
return false ;
if ( height & & height ! = this - > height )
return false ;
if ( depth & & depth ! = this - > depth )
return false ;
if ( mipmaps & & mipmaps > this - > mipmaps )
return false ;
return true ;
}
2019-03-26 18:59:41 +01:00
bool matches ( u32 rsx_address , u32 format , u32 width , u32 height , u32 depth , u32 mipmaps )
2018-09-22 02:14:26 +02:00
{
if ( ! valid_range ( ) )
return false ;
if ( rsx_address ! = get_section_base ( ) )
return false ;
2019-03-26 18:59:41 +01:00
if ( ( gcm_format & format ) ! = format )
return false ;
2018-09-22 02:14:26 +02:00
return matches_dimensions ( width , height , depth , mipmaps ) ;
}
2019-03-26 18:59:41 +01:00
bool matches ( const address_range & memory_range , u32 format , u32 width , u32 height , u32 depth , u32 mipmaps )
2018-09-22 02:14:26 +02:00
{
if ( ! valid_range ( ) )
return false ;
if ( ! rsx : : buffered_section : : matches ( memory_range ) )
return false ;
2019-03-26 18:59:41 +01:00
if ( ( gcm_format & format ) ! = format )
return false ;
2018-09-22 02:14:26 +02:00
return matches_dimensions ( width , height , depth , mipmaps ) ;
}
/**
* Derived wrappers
*/
2018-10-19 00:22:00 +02:00
void destroy ( )
2018-09-22 02:14:26 +02:00
{
derived ( ) - > destroy ( ) ;
}
2018-10-19 00:22:00 +02:00
bool is_managed ( ) const
{
return derived ( ) - > is_managed ( ) ;
}
bool exists ( ) const
2018-09-22 02:14:26 +02:00
{
return derived ( ) - > exists ( ) ;
}
} ;
2018-09-26 00:14:10 +02:00
} // namespace rsx